repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
corredD/upy
|
blender/v280/__init__.py
|
Python
|
gpl-3.0
| 755
| 0.001325
|
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/blender/v271/__init__.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the t
|
erms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You sh
|
ould have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
|
pizzapanther/Getting-Paid-With-Python
|
paypal_demo/paypal_demo/settings.py
|
Python
|
mit
| 2,758
| 0.000725
|
"""
Django settings for paypal_demo project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full li
|
st of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
|
__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gt^xy(p!5wcff5@zy#^cnvuz9ry#-#g$59du41x@a!l=#)3q6+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paypal_demo',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paypal_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paypal_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#PayPal Settings
INSTALLED_APPS.append('paypal.standard.ipn')
PAYPAL_RECEIVER_EMAIL = "sandbox@neutrondrive.com"
|
liukaijv/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_data_labels23.py
|
Python
|
bsd-2-clause
| 1,849
| 0.000541
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_data_labels23.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_char
|
t({'type': 'column'})
chart.axis_ids = [45705856, 45740416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column(
|
'A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {
'value': 1,
'font': {'name': 'Consolas', 'baseline': 1 * -1, 'pitch_family': 49, 'charset': 0}
},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'position': 'inside_base'},
})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
janmtl/pypsych
|
pypsych/data_sources/hrvstitcher.py
|
Python
|
bsd-3-clause
| 10,502
| 0.000286
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes the HRV stitcher data source class
"""
import pandas as pd
import numpy as np
from scipy.io import loadmat
from scipy.interpolate import UnivariateSpline
from data_source import DataSource
from schema import Schema, Or, Optional
def _val(x, pos, label_bin):
return np.mean(x)
def _std(x, pos, label_bin):
return x.std(axis=0)
def _sem(x, pos, label_bin):
return x.sem(axis=0)
def _var(x, pos, label_bin):
return np.var(x)
class HRVStitcher(DataSource):
def __init__(self, config, schedule):
# Call the parent class init
super(HRVStitcher, self).__init__(config, schedule)
self.panels = {'bpm': {'VAL': _val,
'SEM': _sem},
'rr': {'VAL': _val,
'STD': _std},
'twave': {'VAL': _val,
'SEM': _sem}}
def load(self, file_paths):
"""Override for load method to include .mat compatibility."""
self.data['samples'] = pd.read_csv(file_paths['samples'],
comment="#",
delimiter="\t",
skipinitialspace=True,
header=False,
index_col=False,
names=['bpm', 'rr', 'twave'])
raw_mat = loadmat(file_paths['labels'])
events = raw_mat['events'][:, 0]
self.data['labels'] = pd.DataFrame({'flag': events},
index=np.arange(events.size))
def merge_data(self):
"""
Clean and merge the samples and labels data.
"""
# TODO(janmtl): return an error if the files have not been loaded yet.
# Clean the samples data frame and the labels data frame
self.data['samples'] = self._clean_samples(self.data['samples'])
self.data['labels'] = self._clean_labels(self.data['labels'])
self.label_config = self._label_config_to_df(self.config)
# Combine the labels data with the labels configuration
self.data['labels'] = self._merge_labels_and_config(
labels=self.data['labels'],
config=self.label_config)
def bin_data(self):
"""Makes a dict of dicts of pd.Panels at self.output."""
label_bins = self.create_label_bins(self.data['labels'])
major_axis = label_bins.index.values
minor_axis = label_bins.drop(['Start_Time', 'End_Time'], axis=1).columns
minor_axis = minor_axis.append(pd.Index(['stat']))
raw = self.data['samples']
output = {channel: pd.Panel(items=statistics.keys(),
major_axis=major_axis,
minor_axis=minor_axis)
for channel, statistics in self.panels.iteritems()}
for channel, statistics in self.panels.iteritems():
for stat_name, stat_fun in statistics.iteritems():
new_panel = label_bins.copy(deep=True)
new_panel.drop(['Start_Time', 'End_Time'], axis=1, inplace=True)
new_panel['stat'] = np.nan
cond_lbls = pd.Series(data=zip(label_bins.loc[:, 'Condition'],
label_bins.loc[:, 'Label'])
).unique()
for cond_lbl in cond_lbls:
sel = (label_bins.loc[:, 'Condition'] == cond_lbl[0]) \
& (label_bins.loc[:, 'Label'] == cond_lbl[1])
sel_bins = label_bins.loc[sel, :]
samples = pd.Series(name=channel)
pos = pd.Series(name='pos')
for _, label_bin in sel_bins.iterrows():
selector = (raw.index.values >= label_bin['Start_Time']) \
& (raw.index.values < label_bin['End_Time'])
samples = samples.append(raw.loc[selector, channel])
pos = pos.append(raw.loc[selector, 'pos'])
stat = stat_fun(samples, pos)
new_panel.loc[sel, 'stat'] = stat
output[channel][stat_name] = new_panel.sort('Bin_Order')
self.output = output
@staticmethod
def _label_config_to_df(config):
"""Convert the label configuration dictionary to a data frame."""
labels_list = []
for event_type, label_config in config.iteritems():
pattern = label_config['pattern']
if isinstance(pattern, dict):
for event_group, flag in label_config['pattern'].iteritems():
labels_list.append({
'Label': event_type,
'Condition': event_group,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': flag})
elif isinstance(pattern, int):
labels_list.append({
'Label': event_type,
'Condition': np.nan,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': pattern})
else:
raise Exception('Bad Biopac config flag {}'.format(pattern))
return pd.DataFrame(labels_list)
@staticmethod
def _clean_labels(labels):
"""
Turn the Biopac flag channel into a data frame of label flags and start
times.
"""
# TODO(janmtl): finish this docstring
flags = labels['flag'].values
low_offset = np.append(-255, flags)
high_offset = np.append(flags, flags[-1])
event_flags = flags[(low_offset-high_offset) != 0]
start_times = np.where((low_offset-high_offset) != 0)[0]
labels = pd.DataFrame({'flag': event_flags,
'Start_Time': start_times})
labels = labels[(labels['flag'] != 255)]
return labels
@staticmethod
def _clean_samples(samples):
"""
.
"""
scale = 0.55
samples.index = samples.index*100
for col_name, col in samples.iteritems():
x = col.index
y = col.values
spl = UnivariateSpline(x, y, k=5, s=scale*len(x))
samples[col_name] = spl(x)
samples['pos'] = True
return samples
@staticmethod
def _merge_labels_and_config(labels, config):
"""
Merge together the contents of the labels file with the label
configuration dictionary.
"""
labels = pd.merge(labels, config, on='flag')
labels.sort('Start_Time', inplace=True)
return labels
def create_label_bins(self, labels):
"""Replace the N_Bins column with Bin_Index and the Duration column
with End_Time. This procedure grows the number of rows in the labels
data frame."""
total_bins = labels['N_Bins'].sum()
label_bins = pd.DataFrame(columns=['Order', 'ID', 'Label',
|
'Condition', 'Bin_Order',
'Start_Time', 'End_Time',
'Bin_Index'],
|
index=np.arange(0, total_bins))
idx = 0
for _, label in labels.iterrows():
n_bins = label['N_Bins']
cuts = np.linspace(start=label['Start_Time'] + label['Left_Trim'],
stop=(label['Start_Time']
+ label['Duration']
- label['Right_Trim']),
nu
|
AjayMT/nodenet
|
setup.py
|
Python
|
mit
| 787
| 0
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='nodenet',
version='0.1.0',
|
description='an asynchronous node-based UDP networking library',
author='Ajay MT',
author_email='ajaymt@icloud.com',
url='http://github.com/AjayMT/nodenet',
download_url='https://github.com/AjayMT/nodenet/tarball/v0.1.0',
keywords='node network UDP asynchronous',
py_modules=['nodenet'],
requires=[
'pyuv (>1.0.0, <2.0.0)',
'emitter (>=0.0.6)'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: De
|
velopers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
]
)
|
opensvn/test
|
src/study/python/cpp/ch13/roundFloat2.py
|
Python
|
gpl-2.0
| 280
| 0
|
#!/usr/bin/env python
class RoundFloatManual(object):
def __init__(self, val):
assert isinstance(val, float), \
"Value must be a float!"
self.value = round(val, 2
|
)
def __str__(self):
return '%.2
|
f' % self.value
__repr__ = __str__
|
mpclemens/python-explore
|
turtle/bloom.py
|
Python
|
gpl-3.0
| 728
| 0.002747
|
#!/usr/bin/env python
import turtle
import random
def bloom(radius):
turtle.colormode(255)
for rad in range(40, 10, -5):
for looper in range(360//rad):
turtle.up()
turtle.circle(radius+rad, rad)
turtle.begin_fill()
turtle.fillcolor((200+random.randint(0, rad),
200+random.randint(0, rad),
200+random.randint(0, rad)))
turtle.down()
turtle.circle(-rad)
turtle.end_fill()
def main():
"""Simple flower, using global turtle instance"""
turtle.speed(0)
turtle.colormode(1.0)
blo
|
om(5)
turtle.exitonclick()
###
if __name__ == "__main__":
main()
| |
satyamz/Tests
|
pyftpdlib/log.py
|
Python
|
gpl-2.0
| 6,094
| 0.000164
|
#!/usr/bin/env python
# ======================================================================
# Copyright (C) 2007-2014 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
Logging support for pyftpdlib, inspired from Tornado's
(http://www.tornadoweb.org/).
This is not supposed to be imported/used directly.
Instead you should use logging.basicConfig before serve_forever().
"""
import logging
import sys
import time
try:
import curses
except ImportError:
curses = None
from pyftpdlib._compat import unicode
# default logger
logger = logging.getLogger('pyftpdlib')
def _stderr_supports_color():
color = False
if curses is not None and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
# configurable options
LEVEL = logging.INFO
PREFIX = '[%(levelname)1.1s %(asctime)s]'
COLOURED = _stderr_supports_color()
TIME_FORMAT = "%y-%m-%d %H:%M:%S"
# taken and adapted from Tornado
class LogFormatter(logging.Formatter):
"""Log formatter used in pyftpdlib.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
"""
def __init__(self, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self._coloured = COLOURED and _stderr_supports_color()
if self._coloured:
curses.setupterm()
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with un
|
icode strings. The explicit calls to
# unicode() below are harmless in python2 b
|
ut will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf")
or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode(fg_color, "ascii")
self._colors = {
# blues
logging.DEBUG: unicode(curses.tparm(fg_color, 4), "ascii"),
# green
logging.INFO: unicode(curses.tparm(fg_color, 2), "ascii"),
# yellow
logging.WARNING: unicode(curses.tparm(fg_color, 3), "ascii"),
# red
logging.ERROR: unicode(curses.tparm(fg_color, 1), "ascii")
}
self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
def format(self, record):
try:
record.message = record.getMessage()
except Exception:
err = sys.exc_info()[1]
record.message = "Bad message (%r): %r" % (err, record.__dict__)
record.asctime = time.strftime(TIME_FORMAT,
self.converter(record.created))
prefix = PREFIX % record.__dict__
if self._coloured:
prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal)
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings wherever possible).
try:
message = unicode(record.message)
except UnicodeDecodeError:
message = repr(record.message)
formatted = prefix + " " + message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
def _config_logging():
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger = logging.getLogger('pyftpdlib')
logger.setLevel(LEVEL)
logger.addHandler(channel)
|
NINAnor/sentinel4nature
|
Tree canopy cover/regression/GBRT_Luroeykalven_manual_FCLS.py
|
Python
|
gpl-2.0
| 8,821
| 0.006122
|
# GBRT for Luroeykalven case study site
# Training data: manually digitized training areas, including water pixels
# Predictors: results of FCLS spectral unmixing
# Authors: Stefan Blumentrath
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.model_selection import GridSearchCV
from grass.pygrass import raster as r
from grass.pygrass.utils import getenv
import grass.script as gs
from cStringIO import StringIO
from subprocess import PIPE
from io import BytesIO
from itertools import combinations
def setParamDict():
params = {}
for p in ['learning_rate', 'max_depth', 'loss', 'subsample',
'min_samples_leaf', 'max_features', 'n_estimators']:
if p in ['max_depth', 'min_samples_leaf', 'n_estimators']:
params[p] = map(int, options[p].split(','))
elif p in ['learning_rate', 'max_features', 'subsample']:
params[p] = map(float, options[p].split(','))
else:
params[p] = options[p].split(',')
return params
def writeMap(name, x,y,z):
result = BytesIO()
np.savetxt(result,
np.column_stack((x,
y,
z)))
result.seek(0)
gs.write_command('r.in.xyz', stdin=result.getvalue(), input='-', output=name,
method='mean', separator=' ', overwrite=True)
# #############################################################################
# Define variables
# List of input maps has to start with Y
# Initaial settings for automatized model selection
options = {'cores': '20',
'learning_rate': '0.009,0.007,0.005',
'max_depth': '11,13,15',
'min_samples_leaf': '1,2,3',
'max_features': '0.9,0.8,0.7',
'subsample': '0.5',
'loss': 'huber',
'n_estimators': '3000',
'y': 'test_area_luroeykalven_water_grid_25833_10m@p_Sentinel4Nature_S2_Luroeykalven',
'x': 'unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_1,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_2,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_3,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_4,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_5,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_6,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_7,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_8,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_9,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_10',
'deviance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_deviance.pdf',
'featureimportance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_featureimportance.pdf',
'partialdependence': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_partial_dependence.pdf',
'crossval': '0.25',
'output': 'ForestCover_Luroeykalven_water_FCLS',
'spatial_term': None
}
cores = int(options['cores'])
spatial_term = options['spatial_term']
output = options['output']
deviance = options['deviance']
featureimportance = options['featureimportance']
partialdependence = options['partialdependence']
crossval = float(options['crossval'])
params = setParamDict()
# #############################################################################
# Load data
maps = [options['y']] + options['x'].rstrip('\n').split(',')
data = np.genfromtxt(BytesIO(gs.read_command('r.stats',
flags='1Ng',
input=maps)), delimiter=" ")
y = 2
if spatial_term:
x = [0,1] + range(3,len(data[0]))
else:
x = range(3,len(data[0]))
# Create a mas for NoData in either x or y
mask_y = np.isnan(data[:,y])
for i in range(3,len(data[0])):
if i == 3:
mask_x = np.isnan(data[:,i])
else:
mask_x = np.logical_or((np.isnan(data[:,i])), mask_x)
all_y_idx = np.where(np.logical_or(mask_x, mask_y)==False)
all_x_idx = np.where(mask_x==False)
# Random shuffle data points with training data, excluding all NoData
all_y = shuffle(data[all_y_idx])
# Training and test set
offset = int(all_y.shape[0] * (1 - crossval))
X_train, y_train, coor_train = all_y[:offset,x], all_y[:offset,y], all_y[:offset,[0,1]]
X_test, y_test, coor_test= all_y[offset:,x], all_y[offset:,y], all_y[offset:,[0,1]]
# Set for predicitions
predict, coor_predict = data[all_x_idx][:,x], data[all_x_idx][:,[0,1]]
# Run model selection process if requested
model_selection = False
for k in params.keys():
if len(params[k]) > 1:
model_selection = True
if model_selection:
gs.message('Running model selection ...')
clf = ensemble.GradientBoostingRegressor()
# this may take some minutes
gs_cv = GridSearchCV(clf, params, n_jobs=cores).fit(X_train, y_train)
# best hyperparameter setting
best_params = gs_cv.best_params_
print('Best hyper-parameter set is:')
print(best_params)
else:
best_params = {}
for k in params.keys():
best_params[k] = params[k][0]
# #############################################################################
# Fit regression model
gs.message('Fitting regression model ...')
clf = ensemble.GradientBoostingRegressor(**best_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
r2 = r2_score(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
print("R2: %.4f" % r2)
# #############################################################################
# Generate requested plots
# Plot training deviance
# compute test set deviance
if deviance:
test_score = np.zeros((best_params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.rcParams.update({'figure.autolayout': True})
plt.title('Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.savefig(deviance)
# #############################################################################
# Plot feature importance
if featureimportance:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
plt.figure(figsize=(12, 12))
plt.rcParams.update({'figure.autolayout': True})
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
|
#plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(cols)[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig(featureimportance)
if partialdependence:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
fig,
|
axs = plot_partial_dependence(clf, X_train, cols, n_jobs=cores, n_cols=2,
feature_names=cols, figsize=(len(cols), len(cols)*2))
fig.savefig(partialdependence)
sorted_idx = np.argsort(clf.feature_importances_)
twoway = list(combinations(list(reversed(sorted_idx[-6:])), 2))
fig, axs = plot_partial_dependence(clf, X_train, twoway, n_jobs=cores, n_co
|
0asa/gittle
|
examples/diff.py
|
Python
|
apache-2.0
| 227
| 0
|
from gittle import Gittle
repo = Gittle('.')
lastest = [
info['sha']
|
for info in repo.commit_info()[1:3]
]
pr
|
int(repo.diff(*lastest, diff_type='classic'))
print("""
Last Diff
""")
print(list(repo.diff('HEAD')))
|
chippey/gaffer
|
python/GafferSceneTest/__init__.py
|
Python
|
bsd-3-clause
| 5,457
| 0.00055
|
##########################################################################
#
# Copyright (c) 2012-2014, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferScene
from _GafferSceneTest import *
from SceneTestCase import SceneTestCase
from ScenePlugTest import ScenePlugTest
from GroupTest import GroupTest
from SceneTimeWarpTest import SceneTimeWarpTest
from SceneProceduralTest import SceneProceduralTest
from CubeTest import CubeTest
from PlaneTest import PlaneTest
from SphereTest import SphereTest
from InstancerTest import InstancerTest
from ObjectToSceneTest import ObjectToSceneTest
from CameraTest import CameraTest
from OutputsTest import OutputsTest
from CustomOptionsTest import CustomO
|
ptionsTest
from DeleteOptionsTest import DeleteOptionsTest
from CopyOptionsTest import CopyOp
|
tionsTest
from SceneNodeTest import SceneNodeTest
from PathMatcherTest import PathMatcherTest
from PathFilterTest import PathFilterTest
from ShaderAssignmentTest import ShaderAssignmentTest
from CustomAttributesTest import CustomAttributesTest
from AlembicSourceTest import AlembicSourceTest
from DeletePrimitiveVariablesTest import DeletePrimitiveVariablesTest
from SeedsTest import SeedsTest
from SceneContextVariablesTest import SceneContextVariablesTest
from SubTreeTest import SubTreeTest
from OpenGLAttributesTest import OpenGLAttributesTest
from StandardOptionsTest import StandardOptionsTest
from ScenePathTest import ScenePathTest
from PathMatcherDataTest import PathMatcherDataTest
from LightTest import LightTest
from TestRender import TestRender
from RenderTest import RenderTest
from OpenGLShaderTest import OpenGLShaderTest
from OpenGLRenderTest import OpenGLRenderTest
from TransformTest import TransformTest
from AimConstraintTest import AimConstraintTest
from PruneTest import PruneTest
from ShaderTest import ShaderTest
from TextTest import TextTest
from MapProjectionTest import MapProjectionTest
from MapOffsetTest import MapOffsetTest
from PointConstraintTest import PointConstraintTest
from SceneReaderTest import SceneReaderTest
from SceneWriterTest import SceneWriterTest
from IsolateTest import IsolateTest
from DeleteAttributesTest import DeleteAttributesTest
from UnionFilterTest import UnionFilterTest
from SceneSwitchTest import SceneSwitchTest
from ShaderSwitchTest import ShaderSwitchTest
from ParentConstraintTest import ParentConstraintTest
from ParentTest import ParentTest
from StandardAttributesTest import StandardAttributesTest
from PrimitiveVariablesTest import PrimitiveVariablesTest
from DuplicateTest import DuplicateTest
from ModuleTest import ModuleTest
from GridTest import GridTest
from SetTest import SetTest
from FreezeTransformTest import FreezeTransformTest
from SetFilterTest import SetFilterTest
from FilterTest import FilterTest
from SceneAlgoTest import SceneAlgoTest
from CoordinateSystemTest import CoordinateSystemTest
from DeleteOutputsTest import DeleteOutputsTest
from ExternalProceduralTest import ExternalProceduralTest
from ClippingPlaneTest import ClippingPlaneTest
from FilterSwitchTest import FilterSwitchTest
from PointsTypeTest import PointsTypeTest
from ParametersTest import ParametersTest
from SceneFilterPathFilterTest import SceneFilterPathFilterTest
from AttributeVisualiserTest import AttributeVisualiserTest
from SceneLoopTest import SceneLoopTest
from SceneProcessorTest import SceneProcessorTest
from MeshToPointsTest import MeshToPointsTest
from InteractiveRenderTest import InteractiveRenderTest
from FilteredSceneProcessorTest import FilteredSceneProcessorTest
from ShaderBallTest import ShaderBallTest
from LightTweaksTest import LightTweaksTest
from FilterResultsTest import FilterResultsTest
if __name__ == "__main__":
import unittest
unittest.main()
|
seankelly/buildbot
|
master/buildbot/reporters/http.py
|
Python
|
gpl-2.0
| 4,489
| 0.001337
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
import abc
import copy
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.reporters import utils
from buildbot.util import httpclientservice
from buildbot.util import service
class HttpStatusPushBase(service.BuildbotService):
neededDetails = dict()
def checkConfig(self, *args, **kwargs):
service.BuildbotService.checkConfig(self)
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
if not isinstance(kwargs.get('builders'), (type(None), list)):
config.error("builders must be a list or None")
@defer.inlineCallbacks
def reconfigService(self, builders=None, debug=None, verify=None, **kwargs):
|
yield service.BuildbotService.reconfigService(self)
self.debug =
|
debug
self.verify = verify
self.builders = builders
self.neededDetails = copy.copy(self.neededDetails)
for k, v in iteritems(kwargs):
if k.startswith("want"):
self.neededDetails[k] = v
@defer.inlineCallbacks
def startService(self):
yield service.BuildbotService.startService(self)
startConsuming = self.master.mq.startConsuming
self._buildCompleteConsumer = yield startConsuming(
self.buildFinished,
('builds', None, 'finished'))
self._buildStartedConsumer = yield startConsuming(
self.buildStarted,
('builds', None, 'new'))
def stopService(self):
self._buildCompleteConsumer.stopConsuming()
self._buildStartedConsumer.stopConsuming()
def buildStarted(self, key, build):
return self.getMoreInfoAndSend(build)
def buildFinished(self, key, build):
return self.getMoreInfoAndSend(build)
def filterBuilds(self, build):
if self.builders is not None:
return build['builder']['name'] in self.builders
return True
@defer.inlineCallbacks
def getMoreInfoAndSend(self, build):
yield utils.getDetailsForBuild(self.master, build, **self.neededDetails)
if self.filterBuilds(build):
yield self.send(build)
@abc.abstractmethod
def send(self, build):
pass
class HttpStatusPush(HttpStatusPushBase):
name = "HttpStatusPush"
secrets = ['user', 'password', "auth"]
def checkConfig(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
if user is not None and auth is not None:
config.error("Only one of user/password or auth must be given")
if user is not None:
config.warnDeprecated("0.9.1", "user/password is deprecated, use 'auth=(user, password)'")
if (format_fn is not None) and not callable(format_fn):
config.error("format_fn must be a function")
HttpStatusPushBase.checkConfig(self, **kwargs)
@defer.inlineCallbacks
def reconfigService(self, serverUrl, user=None, password=None, auth=None, format_fn=None, **kwargs):
yield HttpStatusPushBase.reconfigService(self, **kwargs)
if user is not None:
auth = (user, password)
if format_fn is None:
self.format_fn = lambda x: x
else:
self.format_fn = format_fn
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, serverUrl, auth=auth)
@defer.inlineCallbacks
def send(self, build):
response = yield self._http.post("", json=self.format_fn(build))
if response.code != 200:
log.msg("%s: unable to upload status: %s" %
(response.code, response.content))
|
davelab6/pyfontaine
|
fontaine/charsets/noto_chars/notosanstamil_regular.py
|
Python
|
gpl-3.0
| 7,479
| 0.017917
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansTamil-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #uni0000 ????
chars.append(0x200B) #uniFEFF ZERO WIDTH SPACE
chars.append(0x200C) #uni200C ZERO WIDTH NON-JOINER
chars.append(0x000D) #uni000D ????
chars.append(0x2212) #minus MINUS SIGN
chars.append(0x2013) #endash EN DASH
chars.append(0x2014) #emdash EM DASH
chars.append(0x2018) #quoteleft LEFT SINGLE QUOTATION MARK
chars.append(0x2019) #quoteright RIGHT SINGLE QUOTATION MARK
chars.append(0x201C) #quotedblleft LEFT DOUBLE QUOTATION MARK
chars.append(0x201D) #quotedblright RIGHT DOUBLE QUOTATION MARK
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x0021) #exclam EXCLAMATION MARK
chars.append(0x0022) #quotedbl QUOTATION MARK
chars.append(0x0023) #numbersign NUMBER SIGN
chars.append(0x0025) #percent PERCENT SIGN
chars.append(0x2026) #ellipsis HORIZONTAL ELLIPSIS
chars.append(0x0027) #quotesingle APOSTROPHE
chars.append(0x0028) #parenleft LEFT PARENTHESIS
chars.append(0x0029) #parenright RIGHT PARENTHESIS
chars.append(0x002A) #asterisk ASTERISK
chars.append(0x002B) #plus PLUS SIGN
chars.append(0x002C) #comma COMMA
chars.append(0x002D) #hyphen HYPHEN-MINUS
chars.append(0x002E) #period FULL STOP
chars.append(0x002F) #slash SOLIDUS
chars.append(0x0030) #zero DIGIT ZERO
chars.append(0x0031) #one DIGIT ONE
chars.append(0x0032) #two DIGIT TWO
chars.append(0x0033) #three DIGIT THREE
chars.append(0x0034) #four DIGIT FOUR
chars.append(0x0035) #five DIGIT FIVE
chars.append(0x0036) #six DIGIT SIX
chars.append(0x0037) #seven DIGIT SEVEN
chars.append(0x0038) #eight DIGIT EIGHT
chars.append(0x0039) #nine DIGIT NINE
chars.append(0x003A) #colon COLON
chars.append(0x003B) #semicolon SEMICOLON
chars.append(0x003C) #less LESS-THAN SIGN
chars.append(0x003D) #equal EQUALS SIGN
chars.append(0x003E) #greater GREATER-THAN SIGN
chars.append(0x003F) #question QUESTION MARK
chars.append(0x200D) #uni200D ZERO WIDTH JOINER
chars.append(0x005B) #bracketleft LEFT SQUARE BRACKET
chars.append(0x005C) #backslash REVERSE SOLIDUS
chars.append(0x005D) #bracketright RIGHT SQUARE BRACKET
chars.append(0x005E) #asciicircum CIRCUMFLEX ACCENT
chars.append(0x005F) #underscore LOW LINE
chars.append(0x007B) #braceleft LEFT CURLY BRACKET
chars.append(0x007C) #bar VERTICAL LINE
chars.append(0x007D) #braceright RIGHT CURLY BRACKET
chars.append(0x007E) #asciitilde TILDE
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x00AD) #uni00AD SOFT HYPHEN
chars.append(0x20B9) #uni20B9 ????
chars.append(0x25CC) #uni25CC DOTTED CIRCLE
chars.append(0x00D7) #multiply MULTIPLICATION SIGN
chars.append(0x00F7) #divide DIVISION SIGN
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
chars.append(0x0964) #uni0964 DEVANAGARI DANDA
chars.append(0x0965) #uni0965 DEVANAGARI DOUBLE DANDA
chars.append(0x0B82) #uni0B82 TAMIL SIGN ANUSVARA
chars.append(0x0B83) #uni0B83 TAMIL SIGN VISARGA
chars.append(0x0B85) #uni0B85 TAMIL LETTER A
chars.append(0x0B86) #uni0B86 TAMIL LETTER AA
chars.append(0x0B87) #uni0B87 TAMIL LETTER I
chars.append(0x0B88) #uni0B88 TAMIL LETTER II
chars.append(0x0B89) #uni0B89 TAMIL LETTER U
chars.append(0x0B8A) #uni0B8A TAMIL LETTER UU
chars.append(0x0B8E) #uni0B8E TAMIL LETTER E
chars.append(0x0B8F) #uni0B8F TAMIL LETTER EE
chars.append(0x0B90) #uni0B90 TAMIL LETTER AI
chars.append(0x0B92) #uni0B92 TAMIL LETTER O
chars.append(0x0B93) #uni0B93 TAMIL LETTER OO
chars.append(0x0B94) #uni0B94 TAMIL LETTER AU
chars.append(0x0B95) #uni0B95 TAMIL LETTER KA
chars.append(0x0B99) #uni0B99 TAMIL LETTER NGA
chars.append(0x0B9A) #uni0B9A TAMIL LETTER CA
chars.append(0x0B9C) #uni0B9C TAMIL LETTER JA
chars.append(0x0B9E) #uni0B9E TAMIL LETTER NYA
chars.append(0x0B9F) #uni0B9F TAMIL LETTER TTA
chars.append(0x0BA3) #uni0BA3 TAMIL LETTER NNA
chars.append(0x0BA4) #uni0BA4 TAMIL LETTER TA
chars.append(0x0BA8) #uni0BA8 TAMIL LETTER NA
chars.append(0x0BA9) #uni0BA9 TAMIL LETTER NNNA
chars.append(0x0BAA) #uni0BAA TAMIL LETTER PA
chars.append(0x0BAE) #uni0BAE TAMIL LETTER MA
chars.append(0x0BAF) #uni0BAF TAMIL LETTER YA
chars.append(0x0BB0) #uni0BB0 TAMIL LETTER RA
chars.append(0x0BB1) #uni0BB1 TAMIL LETTER RRA
chars.append(0x0BB2) #uni0BB2 TAMIL LETTER LA
chars.append(0x0BB3) #uni0BB3 TAMIL LETTER LLA
chars.append(0x0BB4) #uni0BB4 TAMIL LETTER LLLA
chars.append(0x0BB5) #uni0BB5 TAMIL LETTER VA
chars.append(0x0BB6) #uni0BB6 TAMIL LETTER SHA
chars.append(0x0BB7) #uni0BB7 TAMIL LETTER SSA
chars.append(0x0BB8) #uni0BB8 TAMIL LETTER SA
chars.append(0x0BB9) #uni0BB9 TAMIL LETTER HA
chars.append(0x0BBE) #uni0BBE TAMIL VOWEL SIGN AA
chars.append(0x0BBF) #uni0BBF TAMIL VOWEL SIGN I
chars.append(0x0BC0) #uni0BC0 TAMIL VOWEL SIGN II
chars.append(0x0BC1) #uni0BC1 TAMIL VOWEL SIGN U
chars.append(0x0BC2) #uni0BC2 TAMIL VOWEL SIGN UU
chars.append(0x0BC6) #uni0BC6 TAMIL VOWEL SIGN E
chars.append(0x0BC7) #uni0BC7 TAMIL VOWEL SIGN EE
chars.append(0x0BC8) #uni0BC8 TAMIL VOWEL SIGN AI
chars.append(0x0BCA) #uni0BCA TAMIL VOWEL SIGN O
chars.append(0x0BCB) #uni0BCB TAMIL VOWEL SIGN OO
chars.append(0x0BCC) #uni0BCC TAMIL VOWEL SIGN AU
chars.append(0x0BCD) #uni0BCD TAMIL SIGN VIRAMA
chars.append(0x0BD0) #uni0BD0 TAMIL OM
chars.append(0x0BD7) #uni0BD7 TAMIL AU LENGTH MARK
chars.append(0x0BE6) #uni0BE6 TAMIL DIGIT ZERO
chars.append(0x0BE7) #uni0BE7 TAMIL DIGIT ONE
chars.append(0x0BE8) #uni0BE8 TAMIL DIGIT TWO
chars.append(0x0BE9) #uni0BE9 TAMIL DIGIT THREE
chars.append(0x0BEA) #uni0BEA TAMIL DIGIT FOUR
chars.append(0x0BEB) #uni0BEB TAMIL DIGIT FIVE
chars.append(0x0BEC) #uni0BEC TAMIL DIGIT SIX
chars.append(0x0BED) #uni0BED TAMIL DIGIT SEVEN
chars.append(0x0BEE) #uni0BEE TAMIL DIGIT EIGHT
chars.append(0x0BEF) #uni0BEF TAMIL DIGIT NINE
chars.append(0x0B
|
F0) #uni0BF0 TAMIL NUM
|
BER TEN
chars.append(0x0BF1) #uni0BF1 TAMIL NUMBER ONE HUNDRED
chars.append(0x0BF2) #uni0BF2 TAMIL NUMBER ONE THOUSAND
chars.append(0x0BF3) #uni0BF3 TAMIL DAY SIGN
chars.append(0x0BF4) #uni0BF4 TAMIL MONTH SIGN
chars.append(0x0BF5) #uni0BF5 TAMIL YEAR SIGN
chars.append(0x0BF6) #uni0BF6 TAMIL DEBIT SIGN
chars.append(0x0BF7) #uni0BF7 TAMIL CREDIT SIGN
chars.append(0x0BF8) #uni0BF8 TAMIL AS ABOVE SIGN
chars.append(0x0BF9) #uni0BF9 TAMIL RUPEE SIGN
chars.append(0x0BFA) #uni0BFA TAMIL NUMBER SIGN
return chars
|
nickhand/nbodykit
|
nbodykit/source/catalog/file.py
|
Python
|
gpl-3.0
| 7,330
| 0.004093
|
from nbodykit.base.catalog import CatalogSource
from nbodykit.io.stack import FileStack
from nbodykit import CurrentMPIComm
from nbodykit import io
from nbodykit.extern import docrep
from six import string_types
import textwrap
import os
__all__ = ['FileCatalogFactory', 'FileCatalogBase',
'CSVCatalog', 'BinaryCatalog', 'BigFileCatalog',
'HDFCatalog', 'TPMBinaryCatalog', 'Gadget1Catalog', 'FITSCatalog']
class FileCatalogBase(CatalogSource):
"""
Base class to create a source of particles from a
single file, or multiple files, on disk.
Files of a specific type should be subclasses of this class.
Parameters
----------
filetype : subclass of :class:`~nbodykit.io.base.FileType`
the file-like class used to load the data from file; should be a
subclass of :class:`nbodykit.io.base.FileType`
args : tuple, optional
the arguments to pass to the ``filetype`` class when constructing
each file object
kwargs : dict, optional
the keyword arguments to pass to the ``filetype`` class when
constructing each file object
comm : MPI Communicator, optional
the MPI communicator instance; default (``None``) sets to the
current communicator
"""
@CurrentMPIComm.enable
def __init__(self, filetype, args=(), kwargs={}, comm=None):
self.comm = comm
self.filetype = filetype
# bcast the FileStack
if self.comm.rank == 0:
self._source = FileStack(filetype, *args, **kwargs)
else:
self._source = None
self._source = self.c
|
omm.bcast(self._source)
# compute the size; start with full file.
lstart = self.comm.rank * self._source.size // self.comm.size
lend = (self.comm.rank + 1) * self._source.size // self.comm.size
self._size = lend - lstart
self.start = 0
|
self.end = self._source.size
self._lstart = lstart # offset in the file for this rank
self._lend = lend # offset in the file for this rank
# update the meta-data
self.attrs.update(self._source.attrs)
if self.comm.rank == 0:
self.logger.info("Extra arguments to FileType: %s %s" % (str(args), str(kwargs)))
CatalogSource.__init__(self, comm=comm)
def query_range(self, start, end):
"""
Seek to a range in the file catalog.
Parameters
----------
start : int
start of the file relative to the physical file
end : int
end of the file relative to the physical file
Returns
-------
A new catalog that only accesses the given region of the file.
If the original catalog (self) contains any assigned columns not directly
obtained from the file, then the function will raise ValueError, since
the operation in that case is not well defined.
"""
if len(CatalogSource.hardcolumns.fget(self)) > 0:
raise ValueError("cannot seek if columns have been attached to the FileCatalog")
other = self.copy()
other._lstart = self.start + start + self.comm.rank * (end - start) // self.comm.size
other._lend = self.start + start + (self.comm.rank + 1) * (end - start) // self.comm.size
other._size = other._lend - other._lstart
other.start = start
other.end = end
CatalogSource.__init__(other, comm=self.comm)
return other
def __repr__(self):
path = self._source.path
name = self.__class__.__name__
args = (name, self.size, repr(self._source))
return "%s(size=%d, %s)" % args
@property
def hardcolumns(self):
"""
The union of the columns in the file and any transformed columns.
"""
defaults = CatalogSource.hardcolumns.fget(self)
return list(self._source.dtype.names) + defaults
def get_hardcolumn(self, col):
"""
Return a column from the underlying file source.
Columns are returned as dask arrays.
"""
if col in self._source.dtype.names:
return self._source.get_dask(col)[self._lstart:self._lend]
else:
return CatalogSource.get_hardcolumn(self, col)
def _make_docstring(filetype, examples):
"""
Internal function to generate the doc strings for the built-in
CatalogSource objects that rely on :mod:`nbodykit.io` classes
to read data from disk.
"""
qualname = '%s.%s' %(filetype.__module__, filetype.__name__)
__doc__ = """
A CatalogSource that uses :class:`~{qualname}` to read data from disk.
Multiple files can be read at once by supplying a list of file
names or a glob asterisk pattern as the ``path`` argument. See
:ref:`reading-multiple-files` for examples.
Parameters
----------
%(test.parameters)s
comm : MPI Communicator, optional
the MPI communicator instance; default (``None``) sets to the
current communicator
attrs : dict, optional
dictionary of meta-data to store in :attr:`attrs`
""".format(qualname=qualname)
if examples is not None:
__doc__ += """
Examples
--------
Please see :ref:`the documentation <%s>` for examples.
""" %examples
# get the Parameters from the IO libary class
d = docrep.DocstringProcessor()
d.get_sections(d.dedents(filetype.__doc__), 'test', ['Parameters'])
return d.dedents(__doc__)
def FileCatalogFactory(name, filetype, examples=None):
"""
Factory method to create a :class:`~nbodykit.base.catalog.CatalogSource`
that uses a subclass of :mod:`nbodykit.io.base.FileType` to read
data from disk.
Parameters
----------
name : str
the name of the catalog class to create
filetype : subclass of :class:`nbodykit.io.base.FileType`
the subclass of the FileType that reads a specific type of data
examples : str, optional
if given, a documentation cross-reference link where examples can be
found
Returns
-------
subclass of :class:`FileCatalogBase` :
the ``CatalogSource`` object that reads data using ``filetype``
"""
def __init__(self, *args, **kwargs):
comm = kwargs.pop('comm', None)
attrs = kwargs.pop('attrs', {})
FileCatalogBase.__init__(self, filetype=filetype, args=args, kwargs=kwargs, comm=comm)
self.attrs.update(attrs)
# make the doc string for this class
__doc__ = _make_docstring(filetype, examples)
# make the new class object and return it
newclass = type(name, (FileCatalogBase,),{"__init__": __init__, "__doc__":__doc__})
return newclass
CSVCatalog = FileCatalogFactory("CSVCatalog", io.CSVFile, examples='csv-data')
BinaryCatalog = FileCatalogFactory("BinaryCatalog", io.BinaryFile, examples='binary-data')
BigFileCatalog = FileCatalogFactory("BigFileCatalog", io.BigFile, examples='bigfile-data')
HDFCatalog = FileCatalogFactory("HDFCatalog", io.HDFFile, examples='hdf-data')
TPMBinaryCatalog = FileCatalogFactory("TPMBinaryCatalog", io.TPMBinaryFile)
FITSCatalog = FileCatalogFactory("FITSCatalog", io.FITSFile, examples='fits-data')
Gadget1Catalog = FileCatalogFactory("Gadget1Catalog", io.Gadget1File, examples=None)
|
algolia/algoliasearch-client-python
|
tests/unit/test_insights_client.py
|
Python
|
mit
| 883
| 0.001133
|
import unittest
from algoliasearch.configs import InsightsConfig
from algoliasearch.exceptions import AlgoliaException
from algoliasearch.insights_client import InsightsClient
class TestInsightsClient(unittest.TestCase):
def test_create(self):
client = InsightsCli
|
ent.create("foo", "bar")
self.assertIsInstance(client, InsightsClient)
with self.assertRaises(AssertionError) as _:
InsightsClient.create("", "")
def test_create_with_config(self):
config = InsightsConfig("foo", "bar")
self.assertIsInstance(InsightsClient.create_with_config(config), InsightsClient)
def test_region(self):
client = InsightsClient.create("foo", "bar")
|
self.assertEqual(client._config._region, "us")
client = InsightsClient.create("foo", "bar", "fr")
self.assertEqual(client._config._region, "fr")
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/1_5/collada/library_materials/material/extra/technique_sid_target/technique_sid_target.py
|
Python
|
mit
| 4,063
| 0.007384
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_materials', 'material', 'extra', 'technique']
attrName = 'profile'
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior =
|
self.status_baseline
return self.status_superior
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_material_extra_element_names")
# Check for preservation of element data
self.__assistant.FullPreservation(context, self.tagList, self.attrName)
|
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
anush7/django-article-project
|
article/urls.py
|
Python
|
mit
| 197
| 0.005076
|
from django.conf.urls import include, url
from article import views
urlpatterns = [
url(r'^$', views.articles, name=
|
'articles'),
url(r'^add/?$', views.add_articles, name='add-articles'),
|
]
|
texastribune/armstrong.base
|
fabfile/_utils.py
|
Python
|
bsd-3-clause
| 342
| 0.008772
|
from fabric.api import *
from fabric.decorators import task
import os, sys
sys.path[0:0] = [os.path.join(os.path.realpath('.'), '..'), ]
try:
from d51.django.virtualenv.test_runner import run_tests
except ImportError, e:
import sys
sys.stderr.write("This project requires d5
|
1.django.virtualenv.test_runne
|
r\n")
sys.exit(-1)
|
Tehnix/PyIRCb
|
src/irc/botDispatcher.py
|
Python
|
bsd-3-clause
| 2,303
| 0.005211
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IRC bot...
"""
import threading
import src.utilities as util
import src.settings
import src.irc.botObject
class BotDispatcher(threading.Thread):
"""
The BotDispatcher object handles the delegation of the various bots on
the various specified servers.
One Bot object for each server (call name), meaning several bots can be
connected to the same address.
All bots are stored in the botObjects class variable.
"""
botObjects = {}
def __init__(self):
"""Prepare the object and fire off the dispatch method."""
super(BotDispatcher, self).__init__()
self.setti
|
ngsInstance = src.settings.Settings()
self.dispatch()
def dispatch(self):
"""Create one Bot object for each server and start it in threads."""
servers = self.settingsInstance.settings['servers']
f
|
or name, info in servers.items():
self.botObjects[name] = src.irc.botObject.BotObject(
self.settingsInstance.settings,
info
)
thread = threading.Thread(
target=self.botObjects[name].connectToServer
)
thread.start()
def destroyBot(self, botObjName):
"""Gracefully shut down the bot and remove it from self.botObjects."""
try:
self.botObjects[botObjName].destroy()
del self.botObjects[botObjName]
util.write("Bot %s has been detroyed." % botObjName)
except KeyError:
util.write(
"Bot %s does not exist." % botObjName,
outputType="Warning"
)
def reloadBot(self, botObjName):
"""First destroy the Bot object and then reinstantiate it."""
try:
info = self.botObjects[botObjName].info
except KeyError:
info = None
if info is not None:
self.destroyBot(botObjName)
self.botObjects[botObjName] = src.irc.botObject.BotObject(info)
util.write("Bot %s has been reloaded." % botObjName)
else:
util.write(
"Bot %s does not exist." % botObjName,
outputType="Warning"
)
|
rtqichen/torchdiffeq
|
examples/ode_demo.py
|
Python
|
mit
| 5,643
| 0.002304
|
import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([[2., 0.]]).to(device)
t = torch.linspace(0., 25., args.data_size).to(device)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]).to(device)
class Lambda(nn.Module):
def forward(self, t, y):
return torch.mm(y**3, true_A)
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('png')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
ax_vecfield = fig.add_subplot(133, frameon=False)
plt.show(block=False)
def visualize(true_y, pred_y, odefunc, itr):
if args.viz:
ax_traj.cla()
ax_traj.set_title('Trajectories')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('x,y')
ax_traj.plot(t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 0], t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_traj.plot(t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 0], '--', t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_traj.set_xlim(t.cpu().min(), t.cpu().max())
ax_traj.set_ylim(-2, 2)
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Phase Portrait')
ax_phase.set_xlabel('x')
ax_phase.set_ylabel('y')
ax_phase.plot(true_y.cpu().numpy()[:, 0, 0], true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_phase.plot(pred_y.cpu().numpy()[:, 0, 0], pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_phase.set_xlim(-2, 2)
ax_phase.set_ylim(-2, 2)
ax_vecfield.cla()
ax_vecfield.set_title('Learned Vector Field')
ax_vecfield.set_x
|
label('x')
ax_vecfield.set_ylabel('y')
y, x = np.mgrid[-2:2:21j, -2:2:21j]
dydt = odefunc(0, torch.Tensor(np.stack([x, y], -1).reshape(21 * 21, 2)).to(device)).cpu().detach().numpy()
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(21, 21, 2)
ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :
|
, 1], color="black")
ax_vecfield.set_xlim(-2, 2)
ax_vecfield.set_ylim(-2, 2)
fig.tight_layout()
plt.savefig('png/{:03d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 50),
nn.Tanh(),
nn.Linear(50, 2),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
return self.net(y**3)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint(func, batch_y0, batch_t).to(device)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, t)
loss = torch.mean(torch.abs(pred_y - true_y))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y, pred_y, func, ii)
ii += 1
end = time.time()
|
Resmin/Resmin
|
resmin/apps/comment/views.py
|
Python
|
gpl-3.0
| 1,460
| 0
|
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, render
from .models import Comment
from .forms import UpdateCommentForm
from django.http.response import JsonResponse
from django.views.decorators.http import require_http_methods
def update_comment(request, cid):
"""
TODO: This function has code duplication, clean it whe
|
n you have time.
"""
comment = get_object_or_404(Comment, id=cid, owner=request.user)
if request.method == 'POST':
update_comment_form = UpdateCommentForm(
request.POST, comment=comment)
if update_comment_form.is_valid():
update_comment_form.save()
return render(request, 'comments/comment.html', {
'comment': comment})
else:
update_commen
|
t_form = UpdateCommentForm(comment=comment)
return render(request, 'comments/update_form.html', {
'update_comment_form': update_comment_form})
def get_comment(request, cid):
comment = get_object_or_404(Comment, id=cid, owner=request.user)
return render(request, 'comments/comment.html', {
'comment': comment})
@csrf_exempt
@require_http_methods(['POST', ])
def delete_comment(request, cid):
comment = get_object_or_404(Comment, id=cid, owner=request.user)
comment.status = Comment.DELETED_BY_OWNER
comment.save()
comment.story.update_comment_count(save=True)
return JsonResponse({'status': 'deleted'})
|
lpe234/sanicDemo
|
controller/__init__.py
|
Python
|
gpl-3.0
| 137
| 0
|
# -*- coding: UTF-8 -*-
# bp_v1
from .api_v1 import bp_v1
# bp_v2
from .api_v2 i
|
mport bp_v2
|
__author__ = 'lpe234'
"""
Controller
"""
|
zydiig/CKAN.py
|
libckan/fs.py
|
Python
|
gpl-3.0
| 1,487
| 0
|
import logging
import re
from pathlib import Path
def find_by_name(path, name):
if type(path) is str:
path = Path(path)
for child in path.iterdir():
if
|
child.name == name:
return child
elif child.is_d
|
ir():
ret = find_by_name(child, name)
if ret:
return ret
return None
def find_by_regexp(path, regexp):
if type(path) is str:
path = Path(path)
for child in path.iterdir():
if re.fullmatch(regexp, str(child)):
return child
elif child.is_dir():
ret = find_by_regexp(child, regexp)
if ret:
return ret
return None
def gen_file_list(path: Path, filters=None, toplevel=True):
filters = filters or []
files = [] if toplevel else [path]
for child in path.iterdir():
if all([filter_.check(child) for filter_ in filters]):
if child.is_dir():
files += gen_file_list(child, filters, toplevel=False)
else:
files.append(child)
files = [file.relative_to(path) for file in files] if toplevel else files
for file in files:
if not all([filter_.check(file) for filter_ in filters]):
files.remove(file)
logging.info("Filtered:{}".format(file))
return files
def is_relative_to(this, that):
try:
this.relative_to(that)
except ValueError:
return False
else:
return True
|
aerval/de.rki.proteomic_virus_detection
|
plugin_source/SixFrameTranslation.py
|
Python
|
gpl-2.0
| 2,361
| 0.011012
|
from Bio import SeqIO
from Bio.Alphabet import IUPAC, ProteinAlphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from optparse import OptionParser
def translate_to_six_frame(dnaRecord, translationTable):
'''
translate a Bio.SeqRecord of a DNA sequence via the given translation table into the six possible translations
dnaRecord = Bio.SeqRecord of DNA sequence (or other)
translationTable = the codon table for translating base triplets into amino acids (number between 1 and 25 based on http://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes)
'''
translations = []
for frame in range(3):
for direction in ['forward', 'reverse']:
if direction == 'forward':
sequence = dnaRecord.seq[frame:]
else:
sequence = dnaRecord.seq.reverse_complement()[frame:]
aaSeq = Seq(str(sequence.translate(translationTable)), alphabet = ProteinAlphabet)
aaRecord = SeqRecord(aaSeq, dnaRecord.name)
aaRecord.id = '%s_%s%i' % (aaRecord.id, direction[0], frame)
aaRecord.description = '%s|tran
|
slation %s frame %i' % (aaRecord.description, direction, frame)
translations.append(aaRecord)
|
return translations
if __name__ == '__main__':
op = OptionParser()
op.add_option('-g','--genomes', dest='genomeFilenames', action='append', default=None, help='the input genome in fasta format')
op.add_option('-o','--outputs', dest='outputFilenames', action='append', default=None, help='the output fasta file with the six frame translation')
op.add_option('-t','--translTable', dest='translationTableNumber', default=1, type='int', help='a translation table number according to http://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes')
opts, args = op.parse_args()
for genomeFilename, outputFilename in zip(opts.genomeFilenames, opts.outputFilenames):
translations = []
for dnaRecord in SeqIO.parse(open(genomeFilename), 'fasta'):
translations.extend(translate_to_six_frame(dnaRecord, opts.translationTableNumber))
SeqIO.write(translations, open(outputFilename, 'w+'), 'fasta')
|
davek44/Scimm
|
bin/scimm.py
|
Python
|
artistic-2.0
| 9,432
| 0.004983
|
#!/usr/bin/env python
from optparse import OptionParser, SUPPRESS_HELP
import os, glob, subprocess, sys, math, shutil
import imm_cluster, util
############################################################
# scimm.py
#
# Sequence Clustering with Interpolated Markov Models
#
# Author: David Kelley
############################################################
scimm_bin = "/Users/dk/research/umd/metagenomics_clustering/Scimm/bin"
bin_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ':' + bin_dir
else:
os.environ['PYTHONPATH'] = bin_dir
############################################################
# main
############################################################
def main():
parser = OptionParser()
# generic options
parser.add_option('-s','-r', dest='readsf', help='Fasta file of sequences')
parser.add_option('-k', dest='k', type='int', help='Number of clusters')
parser.add_option('-p', dest='proc', type='int', default=2, help='Number of processes to run [Default=%default]')
# help='Use a soft assignment of reads to clusters [Default=%default]'
parser.add_option('--em',dest='soft_assign', action='store_true', default=False, help=SUPPRESS_HELP)
# likelybin options
parser.add_option('--ls', dest='lb_starts', type='int', default=1, help='Number of random LikelyBin starts [Default=%default]')
parser.add_option('--ln', dest='lb_numreads', type='int', default=3000, help='Number of reads to sample for LikelyBin [Default=%default]')
parser.add_option('--lt', dest='lb_threads', type='int', default=2, help='Number of LikelyBin threads per start, and CPUs for imm_cluster [Default=%default]')
parser.add_option('--lo', dest='lb_order', type='int', default=3, help='Order of LikelyBin Markov model [Default=%default]')
# compostbin options
parser.add_option('--cs', dest='cb_starts', type='int', default=1, help='Number of random CompostBin starts [Default=%default]')
parser.add_option('--cn', dest='cb_numreads', type='int', default=3000, help='Number of reads to sample for CompostBin [Default=%default]')
parser.add_option('--ct', dest='cb_threads', type='int', default=1, help='Number of CPUs for imm_cluster [Default=%default]')
parser.add_option('--co','--cm', dest='cb_mers', type='int', default=5, help='mers to count in CompostBin [Default=%default]')
(options, args) = parser.parse_args()
options.readsf = os.path.abspath(options.readsf)
total_starts = options.lb_starts + options.cb_starts
if options.soft_assign:
em = '--em'
else:
em = ''
# run initial samples
i = 0
while i < total_starts:
p = []
j = 0
while j < options.proc and i < total_starts:
# LikelyBin
if i < options.lb_starts:
# double check processes
if j + options.lb_threads <= options.proc:
# make a temp dir to compute in and cd to it
temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/lb_init.py -r %s -n %d -k %d -o %d -p %d %s' % (bin_dir, options.readsf, options.lb_numreads, options.k, options.lb_order, options.lb_threads, em), shell=True))
os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more lb threads than processes'
exit()
j += options.lb_threads # even if not true, just move things along
# CompostBin
else:
# double check processes
if j + options.cb_threads <= opti
|
ons.proc:
# make a temp dir to compute in and cd to it
temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/cb_init.py -r %s -n %d -k %d -m %d -p %d %s' % (bin_dir, options.readsf, options.cb_numreads, options.k, options.cb_mers, options.cb_threads, em), shell=True))
os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more cb threads than processes'
|
exit()
j += options.lb_threads # even if not true, just move things along
# wait for processes to finish
for j in range(len(p)):
os.waitpid(p[j].pid, 0)
# choose best start
#maxlike_clusters(total_starts, options.readsf, options.k, options.soft_assign)
minentropy_clusters(total_starts, options.readsf, options.k, options.soft_assign)
# in case k changed
new_k = determine_k(options.soft_assign, options.k)
# run imm clustering completely
p = subprocess.Popen('%s/imm_cluster.py -k %d -r %s -p %d -i --trained %s &> immc.log' % (bin_dir, new_k, options.readsf, options.proc, em), shell=True)
os.waitpid(p.pid, 0)
############################################################
# temp_dir
#
# Create and change to a temporary directory to do initial
# runs within
############################################################
def temp_dir(tmpdir):
if os.path.isdir(tmpdir):
os.chdir(tmpdir)
for f in glob.glob('*'):
os.remove(f)
else:
os.mkdir(tmpdir)
os.chdir(tmpdir)
############################################################
# maxlike_clusters
#
# Copy the clustering with maximum likelihood to the main
# directory
############################################################
def maxlike_clusters(total_starts, readsf, k, soft_assign):
like = [0]*total_starts
for i in range(total_starts):
os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
like[i] = scimm_like(readsf, k, soft_assign)
else:
# something failed
like[i] = ''
os.chdir('..')
# find max likelihood initial partitioning
max_like = min(like) # '' is greater than numbers
for i in range(len(like)):
if like[i] != '' and like[i] >= max_like:
max_like = like[i]
max_clust = i
# get files from max
for c in range(len(glob.glob('cluster-*.fa'))):
shutil.copy('tmp.start%d/cluster-%d.fa' % (max_clust,c), 'cluster-%d.fa' % c)
shutil.copy('tmp.start%d/icm-%dscores.tmp' % (max_clust,c), 'icm-%dscores.tmp' % c)
############################################################
# scimm_like
#
# Calculate the likelihood of the given clustering and IMM
############################################################
def scimm_like(readsf, k, soft_assign):
new_k = determine_k(soft_assign, k)
priors = imm_cluster.update_priors([1.0/new_k]*new_k, readsf, {}, {}, soft_assign)
(likelihood,read_probs) = imm_cluster.get_read_probs(priors, {}, {}, soft_assign)
return likelihood
############################################################
# minentropy_clusters
#
# Copy the clustering with minimum entropy to the main
# directory.
############################################################
def minentropy_clusters(total_starts, readsf, k, soft_assign):
entropy = [0]*total_starts
for i in range(total_starts):
os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
entropy[i] = get_entropy(readsf, k, soft_assign)
else:
# something failed
entropy[i] = ''
os.chdir('..')
# find min entropy partitioning ('' is greater than numbers)
(min_entropy, min_clust) = util.min_i(entropy)
# get files from min
for c in range(len(glob.glob('tmp.start%d/cluster-*.fa' % min_clust))):
shutil.copy('tmp.start%d/cluster-%d.fa' % (min_clust,c), 'cluster-%d.fa' % c)
shutil.copy('tmp.start%d/icm-%d.scores.tmp' % (min_clust,c), 'icm-%d.scores.tmp' % c)
############################################################
# get_entropy
#
# Return the entropy of the clusters in the current
# directory.
##########################################################
|
yosuke/OpenHRIVoice
|
openhrivoice/parsecmudict.py
|
Python
|
epl-1.0
| 1,210
| 0.004959
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''CMU dict file parser
Copyright (C) 2010
Yosuke Matsusaka
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the Eclipse Public License -v 1.0 (EPL)
http://www.opensource.org/licenses/eclipse-1.0.txt
'''
class CMUDict:
""" Utility class to parse CMU Pronunciation Dictionaly."""
def __init__(self, fname):
self._fname = fname
self._dict = {}
self.parse(self._fname)
def parse(self, fname):
f = open(fname, 'r')
|
f.readline()
for l in f:
|
t = l.strip().split(' ', 2)
w = t[0].strip('()"')
v = t[2].replace('(', '').replace(')', '').replace(' 0', '').replace(' 1', '')
try:
self._dict[w].append(v)
except KeyError:
self._dict[w] = [v,]
def lookup(self, w):
try:
return self._dict[w]
except KeyError:
return []
if __name__ == '__main__':
doc = CMUDict('/usr/share/festival/dicts/cmu/cmudict-0.4.out')
print doc.lookup('hello')
|
rosenvladimirov/addons
|
l10n_bg_uic/hooks.py
|
Python
|
agpl-3.0
| 521
| 0.001931
|
# -
|
*- coding: utf-8 -*-
# © 2015 Roberto Lizana (Trey)
# © 2016 Pedro M. Baeza
# © 2017 Rosen Vladimirov
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, SUPERUSER_ID
def post_init_hook(cr, registry):
cr.execute("""
INSER
|
T INTO res_partner_id_number
(partner_id, name, category_id, status, active)
SELECT id, company_registry, 1, 'open', TRUE
FROM res_partner
WHERE company_registry IS NOT NULL""")
env = api.Environment(cr, SUPERUSER_ID, {})
|
vlegoff/tsunami
|
src/secondaires/navigation/commandes/debarquer/__init__.py
|
Python
|
bsd-3-clause
| 4,221
| 0.000951
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'débarquer'."""
from math import sqrt
from primaires.interpreteur.commande.commande import Commande
from secondaires.navigation.constantes import *
class CmdDebarquer(Commande):
"""Commande 'debarquer'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "debarquer", "debark")
self.nom_categorie = "navire"
self.aide_courte = "débarque du navire"
self.aide_longue = \
"Cette commande permet de débarquer du navire sur lequel " \
"on se trouve. On doit se trouver assez prêt d'une côte " \
"pour débarquer dessus."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
if navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
pers
|
onnage.agir("bouger")
# On va chercher la salle la plus proche
etendue = navire.etendue
|
# On cherche la salle de nagvire la plus proche
d_salle = None # la salle de destination
distance = 2
x, y, z = salle.coords.tuple()
for t_salle in etendue.cotes.values():
if t_salle.coords.z == z:
t_x, t_y, t_z = t_salle.coords.tuple()
t_distance = sqrt((x - t_x) ** 2 + (y - t_y) ** 2)
if t_distance < distance and t_salle.nom_terrain in \
TERRAINS_ACCOSTABLES:
d_salle = t_salle
distance = t_distance
if d_salle is None:
personnage << "|err|Aucun quai n'a pu être trouvé à " \
"proximité.|ff|"
return
personnage.salle = d_salle
personnage << "Vous sautez sur {}.".format(
d_salle.titre.lower())
personnage << d_salle.regarder(personnage)
d_salle.envoyer("{{}} arrive en sautant depuis {}.".format(
navire.nom), personnage)
salle.envoyer("{{}} saute sur {}.".format(
d_salle.titre.lower()), personnage)
importeur.hook["personnage:deplacer"].executer(
personnage, d_salle, None, 0)
if not hasattr(d_salle, "navire") or d_salle.navire is None:
personnage.envoyer_tip("N'oubliez pas d'amarrer votre navire " \
"avec %amarre% %amarre:attacher%.")
|
pragmatux/systemd
|
src/python-systemd/journal.py
|
Python
|
gpl-2.0
| 20,273
| 0.002318
|
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY, SYSTEM_ONLY,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_c
|
onvert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PR
|
IORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
if _sys.version_info >= (3,):
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
else:
def next(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
|
LighthouseUK/jerboa
|
setup.py
|
Python
|
lgpl-3.0
| 659
| 0.004552
|
from distutils.core import setup
setup(
name='jerboa',
packages=['jerboa'], # this must be the same as the name above
version='0.2.1-alpha',
description='',
author='Matt Badger',
author_email='foss@lighthouseuk.net',
url='https://github.com/LighthouseUK/jerboa', # use the URL to the githu
|
b repo
download_url='https://github.com/LighthouseUK/jerboa/tarball/0.2.1-alpha', # I'll explain this in a second
keywords=['gae', 'lighthouse', 'jerboa', 'webapp2'], # arbitrary keywords
classifiers=[],
requires=['web
|
app2', 'blinker', 'wtforms', 'jinja2', 'pytz', 'babel', 'pycrypto'],
# tests_require=['WebTest']
)
|
elysiumd/windows-wallet-13.2
|
qa/rpc-tests/fundrawtransaction.py
|
Python
|
mit
| 28,411
| 0.010137
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_net
|
work_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation a
|
nd we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx
|
lycantropos/VKApp
|
manage.py
|
Python
|
gpl-3.0
| 882
| 0
|
import unittest
import click
from tests.test_app import TestApp
|
from tests.test_models import TestModels
from tests.test_utils import TestUtils
@click.group(name='tests', invoke_without_command=False)
def test():
pass
@test.command(name='test_
|
models')
def test_models():
"""Tests implemented models"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestModels)
unittest.TextTestRunner(verbosity=2).run(suite)
@test.command(name='test_utils')
def test_utils():
"""Tests utility functions"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestUtils)
unittest.TextTestRunner(verbosity=2).run(suite)
@test.command(name='test_app')
def test_app():
"""Tests utility functions"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestApp)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
test()
|
Infinidat/pyvmomi
|
pyVim/connect.py
|
Python
|
apache-2.0
| 29,885
| 0.012682
|
# VMware vSphere Python SDK
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## @file connect.py
## @brief Connect to a VMOMI ServiceInstance.
##
## Detailed description (for Doxygen goes here)
"""
Connect to a VMOMI ServiceInstance.
Detailed description (for [e]pydoc goes here).
"""
from six import reraise
import sys
import re
import ssl
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from six.moves import http_client
import requests
from requests.auth import HTTPBasicAuth
from pyVmomi import vim, vmodl, SoapStubAdapter, SessionOrientedStub
from pyVmomi.SoapAdapter import CONNECTION_POOL_IDLE_TIMEOUT_SEC
from pyVmomi.VmomiSupport import nsMap, versionIdMap, versionMap, IsChildVersion
from pyVmomi.VmomiSupport import GetServiceVersions
"""
Global regular expression for parsing host and port connection
See http://www.ietf.org/rfc/rfc3986.txt sec 3.2.2
"""
_rx = re.compile(r"(^\[.+\]|[^:]+)(:\d+)?$")
_si = None
"""
Global (thread-shared) ServiceInstance
@todo: Get rid of me?
"""
def localSslFixup(host, sslContext):
"""
Connections to 'localhost' do not need SSL verification as a certificate
will never match. The OS provides security by only allowing root to bind
to low-numbered ports.
"""
if not sslContext and host in ['localhost', '127.0.0.1', '::1']:
import ssl
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
return sslContext
class closing(object):
"""
Helper class for using closable objects in a 'with' statement,
similar to the one provided by contextlib.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *exc_info):
self.obj.close()
class VimSessionOrientedStub(SessionOrientedStub):
'''A vim-specific SessionOrientedStub. See the SessionOrientedStub class
in pyVmomi/SoapAdapter.py for more information.'''
# The set of exceptions that should trigger a relogin by the session stub.
SESSION_EXCEPTIONS = (
vim.fault.NotAuthenticated,
)
@staticmethod
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
@staticmethod
def makeExtensionLoginMethod(extensionKey):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.LoginExtensionByCertificate(extensionKey)
return _doLogin
@staticmethod
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a HoK SAML token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_hok_saml_assertion(cert,key)
def _requestModifier(request):
return sso.add_saml_context(request, samlAssertion, key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
with soapStub.requestModifier(_requestModifier):
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
@staticmethod
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
|
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm
|
= si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
sslContext=None, b64token=None, mechanism='userpass'):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
|
jakirkham/dask
|
dask/dataframe/io/tests/test_parquet.py
|
Python
|
bsd-3-clause
| 129,660
| 0.001057
|
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if pa_version.major >= 5 and not SKIP_PYARROW:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdi
|
r(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divis
|
ions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
|
smerritt/swift
|
swift/common/ring/utils.py
|
Python
|
apache-2.0
| 26,082
| 0.000038
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import optparse
import re
import socket
from swift.common import exceptions
from swift.common.utils import expand_ipv6, is_valid_ip, is_valid_ipv4, \
is_valid_ipv6
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['region']
t2 = dev['zone']
t3 = dev['ip']
t4 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3),
(t1, t2, t3, t4))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2 -+---- device id 15
|
|
+---- device id 16
|
+---- device id 17
The
|
tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1),
(1, 1, 192.168.101.2)],
(1, 2): [(1, 2, 192.168.102.1),
(1, 2, 192.168.102.2)],
(2, 1): [(2, 1, 192.168.201.1),
(2, 1, 192.168.201.2)],
(1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0),
(1, 1, 192.168.101.1, 1),
(1, 1, 192.168.101.1, 2)],
(1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3),
(1, 1, 192.168.101.2, 4),
(1, 1, 192.168.101.2, 5)],
(1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6),
(1, 2, 192.168.102.1, 7),
(1, 2, 192.168.102.1, 8)],
(1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9),
(1, 2, 192.168.102.2, 10)],
(2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12),
(2, 1, 192.168.201.1, 13),
(2, 1, 192.168.201.1, 14)],
(2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15),
(2, 1, 192.168.201.2, 16),
(2, 1, 192.168.201.2, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip)
def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
return new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address)
def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname.endswith('.'):
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
candidate_ips = []
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
|
cfagiani/crowdcloud
|
build_cloud.py
|
Python
|
apache-2.0
| 7,232
| 0.013689
|
"""
__author__ = 'Christopher Fagiani'
"""
import sys, argparse, json, string, collections
from datetime import datetime
SummaryData = collections.namedtuple('SummaryData', 'wordCounts articles commentCount authors anonCount')
def main(args):
"""This program will output a word cloud as html based on the frequencies of words in a data file
"""
process_data(args.threshold,args.inputFile,args.stopFile, args.outputFile)
def process_data(threshold,dataFile, stopwordsFile, outputFile, interval=None):
with open(dataFile) as in_file:
data = json.load(in_file)
summary_data = build_counts(data,load_stopwords(stopwordsFile))
write_json(summary_data, outputFile,int(threshold),True,interval)
def write_json(summary_data, outputFile, threshold, as_variable=True, interval=None):
"""Writes a json file containing the count data for each word. If asVariable is true (the default), the data is
output as a javascript variable declaration rather than a raw JSON array.
"""
sorted_data = sorted(summary_data.wordCounts.items(),key=lambda x: x[1]['count'], reverse=True)
with open(outputFile,'w') as out_file:
count = 0
if as_variable:
out_file.write("var lastUpdated='"+datetime.now().strftime("%D at %H:%M:%S")+"';\n")
out_file.write("var threshold='"+str(threshold)+"';\n")
out_file.write("var commentCount='"+str(summary_data.commentCount)+"';\n")
out_file.write("var articleCount='"+str(len(summary_data.articles))+"';
|
\n")
out_file.write("var authorCount='"+str(len(summary_data.authors))+"';\n")
out_file.write("var anonCount='"+str(summary_data.anonCount)+"';\n")
if interval is not None:
|
out_file.write("var intervalHrs='"+interval+"';\n")
else:
out_file.write("var intervalHrs='unknown';\n")
out_file.write("var words = [")
else:
out_file.write("[")
for item in sorted_data:
if(item[1]['count']<threshold):
break
if count > 0:
out_file.write(",")
count += 1
out_file.write(json.dumps(item[1]))
out_file.write("]")
def load_stopwords(filename):
"""loads the stopwords file
"""
words = set()
with open(filename) as stop_file:
for line in stop_file:
words.add(line.strip())
return words
def build_counts(data, stop_words, lemmatize=True):
"""builds a dictionary keyed on the lowercase version of the sanitized string.
The values are a dictionary that contains the raw word, the count of occurrences
and a dictionary of articles (keys = links, values = titles) for each article associated with the word.
"""
words = {}
comment_count = 0
articles = set()
authors = set()
anon_count = 0
spam_count = 0
lm = None
if lemmatize:
lm = initialize_lemmatizer()
for item in data:
if compute_spam_score(item['msg']) >= .7:
spam_count += 1
continue
comment_count += 1
articles.add(item['link'])
text = item['msg']
authors.add(item['author'])
if item.get('authorId') == None:
anon_count += 1
for word in text.split():
raw_word = sanitize_word(word)
word = raw_word.lower()
if lemmatize:
word = lemmatize_word(lm,word)
if word not in stop_words and all(c in string.printable for c in word):
record = words.get(word,None)
if record is None:
if raw_word == 'us':
print word
record = {'count':1, 'word':raw_word,
'articles':{item['link']:item['title']}}
words[word]=record
else:
record['count']=record['count']+1
record['articles'][item['link']]=item['title']
return SummaryData(words, articles, comment_count, authors, anon_count)
def initialize_lemmatizer():
"""Initializes the wordnet lemmatizer. You must install nltk and download
the wordnet corpus prior to using this method (after downloading nltk, import it and run nltk.download())
"""
from nltk.stem.wordnet import WordNetLemmatizer
return WordNetLemmatizer()
def lemmatize_word(lm,word):
"""Lemmatizes a word using the nltk library.
Since we don't know the part of speech, this method performs 2 lemmatizations (once as a noun and once as a verb)
The verson of the word that differs from the input word is returned.
This is not always guaranteed to generate a correct answer, but it's good enough for our purposes.
"""
candidateN = lm.lemmatize(word,'n')
candidateV = lm.lemmatize(word,'v')
if candidateN == word:
return candidateV
else:
return candidateN
def sanitize_word(word):
"""returns word after replacing common punctuation with the empty string
"""
word = word.replace(".","").replace(",","").replace("?","").replace(":","")\
.replace("(","").replace(")","").replace("*","").replace(";","").replace('"',"").replace("!","")
word = word.replace("]","").replace("[","")
return word
def compute_spam_score(comment_text):
"""
Computes a "spam score" that is the likelihood that a comment is Spam (expressed as a value between 0 and 1).
:param comment_text: comment to score
:return: value between 0 (definitely not spam) and 1 (definitely spam)
"""
spam_indicator_count = 0
normalized_text = comment_text.lower()
if "getting paid" in normalized_text or "earn" in normalized_text or "earning" in normalized_text:
if "internet" in normalized_text:
spam_indicator_count += 5
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 5
if "an hour" in normalized_text or "hourly" in normalized_text or "/hr" in normalized_text or "monthly" in normalized_text:
if "job" in normalized_text:
spam_indicator_count += 3
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 5
if "http://" in normalized_text or "href=" in normalized_text:
spam_indicator_count += 2
return spam_indicator_count / 20.0
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Build a tag cloud from json data")
argparser.add_argument("-i","--input", metavar='inputfile',required=True,help='file containing json data',dest='inputFile')
argparser.add_argument("-o","--output", metavar='outputFile',required=True,help='output file',dest='outputFile')
argparser.add_argument("-s","--stopfile", metavar="stopwordFile",default="stopwords.txt",help="stopwords file",dest="stopFile")
argparser.add_argument("-t","--threshold", metavar="countThreshold",default=4,help="count threshold",dest="threshold")
main(argparser.parse_args())
|
coberger/DIRAC
|
DataManagementSystem/Client/FailoverTransfer.py
|
Python
|
gpl-3.0
| 11,229
| 0.027518
|
""" Failover Transfer
The failover transfer client exposes the following methods:
- transferAndRegisterFile()
- transferAndRegisterFileFailover()
Initially these methods were developed inside workflow modules but
have evolved to a generic 'transfer file with failover' client.
The transferAndRegisterFile() method will correctly set registration
requests in case of failure.
The transferAndRegisterFileFailover() method will attempt to upload
a file to a list of alternative SEs and set appropriate replication
to the original target SE as well as the removal request for the
temporary replica.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
class FailoverTransfer( object ):
""" .. class:: FailoverTransfer
"""
#############################################################################
def __init__( self, requestObject = None, log = None, defaultChecksumType = 'ADLER32' ):
""" Constructor function, can specify request object to instantiate
FailoverTransfer or a new request object is created.
"""
self.log = log
if not self.log:
self.log = gLogger.getSubLogger( "FailoverTransfer" )
self.request = requestObject
if not self.request:
self.request = Request()
self.request.RequestName = 'noname_request'
self.request.SourceComponent = 'FailoverTransfer'
self.defaultChecksumType = defaultChecksumType
self.registrationProtocols = getRegistrationProtocols()
#############################################################################
def transferAndRegisterFile( self,
fileName,
localPath,
lfn,
destinationSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation with failover.
"""
errorList = []
fileGUID = fileMetaDict.get( "GUID", None )
for se in destinationSEList:
self.log.info( "Attempting dm.putAndRegister('%s','%s','%s',guid='%s',catalog='%s')" % ( lfn,
localPath,
se,
fileGUID,
fileCatalog ) )
result = DataManager( catalogs = fileCatalog, masterCatalogOnly = masterCatalogOnly ).putAndRegister( lfn, localPath, se, guid = fileGUID )
self.log.verbose( result )
if not result['OK']:
self.log.error( 'dm.putAndRegister failed with message', result['Message'] )
errorList.append( result['Message'] )
continue
if not result['Value']['Failed']:
self.log.info( 'dm.putAndRegister successfully uploaded and registered %s to %s' % ( fileName, se ) )
return S_OK( {'uploadedSE':se, 'lfn':lfn} )
# Now we know something went wrong
self.log.warn( "Didn't manage to do everything, now adding requests for the missing operation" )
errorDict = result['Value']['Failed'][lfn]
if 'register' not in errorDict:
self.log.error( 'dm.putAndRegister failed with unknown error', str( errorDict ) )
errorList.append( 'Unknown error while attempting upload to %s' % se )
continue
# fileDict = errorDict['register']
# Therefore the registration failed but the upload was successful
if not fileCatalog:
fileCatalog = ''
if masterCatalogOnly:
fileCatalog = FileCatalog().getMasterCatalogNames()['Value']
result = self._setRegistrationRequest( lfn, se, fileMetaDict, fileCatalog )
if not result['OK']:
self.log.error( 'Failed to set registration request', 'SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
errorList.append( 'Failed to set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
continue
else:
self.log.info( 'Successfully set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
metadata = {}
metadata['filedict'] = fileMetaDict
metadata['uploadedSE'] = se
metadata['lfn'] = lfn
metadata['registration'] = 'request'
return S_OK( metadata )
self.log.error( 'Failed to upload output data file', 'Encountered %s errors' % len( errorList ) )
return S_ERROR( 'Failed to upload output data file' )
#############################################################################
def transferAndRegisterFileFailover( self,
fileName,
localPath,
lfn,
targetSE,
failoverSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation to failover storage and sets the
necessary replication and removal requests to recover.
"""
failover = self.transferAndRegisterFile( fileName, localPath, lfn, failoverSEList, fileMetaDict, fileCatalog, masterCatalogOnly = masterCatalogOnly )
if not failover['OK']:
self.log.error( 'Could not upload file to failover SEs', failover['Message'] )
return failover
# set removal requests and replication requests
result = self._setFileReplicationRequest( lfn, targetSE, fileMetaDict, sourceSE = failover['Value']['uploadedSE'] )
if not result['OK']:
self.log.error( 'Could not set file replication request', result['Message'] )
return result
lfn = failover['Value']['lfn']
failoverSE = failover['Value']['uploadedSE']
self.log.info( 'Attempting to set replica removal request for LFN %s at failover SE %s' % ( lfn, failoverSE ) )
result = self._setRep
|
licaRemovalRequest( lfn, failoverSE )
if not result['OK']:
self.log.error( 'Could not set removal request', result['Message'] )
return result
return S_OK( {'uploadedSE':failoverSE, 'lfn':lfn} )
def getRequest( self ):
""" get the accumulated request object
"""
return self.request
def commitRequest( self ):
""" Send request to the R
|
equest Management Service
"""
if self.request.isEmpty():
return S_OK()
isValid = RequestValidator().validate( self.request )
if not isValid["OK"]:
return S_ERROR( "Failover request is not valid: %s" % isValid["Message"] )
else:
requestClient = ReqClient()
result = requestClient.putRequest( self.request )
return result
#############################################################################
def _setFileReplicationRequest( self, lfn, targetSE, fileMetaDict, sourceSE = '' ):
""" Sets a registration request.
"""
self.log.info( 'Setting ReplicateAndRegister request for %s to %s' % ( lfn, targetSE ) )
transfer = Operation()
transfer.Type = "ReplicateAndRegister"
transfer.TargetSE = targetSE
if sourceSE:
transfer.Sou
|
NERC-CEH/jules-jasmin
|
majic/joj/crowd/client.py
|
Python
|
gpl-2.0
| 12,041
| 0.00108
|
"""
#header
"""
import base64
import datetime
import logging
from joj.crowd.models import UserRequest
import urllib2
import simplejson
from simplejson import JSONDecodeError
log = logging.getLogger(__name__)
class ClientException(Exception):
""" Base exception for Crowd-based errors """
def __init__(self):
pass
class SessionNotFoundException(ClientException):
"""Use this for:
+ User session expiry
+ Nonsense user session etc."""
def __init__(self):
pass
class AuthenticationFailedException(ClientException):
"""If the user has entered incorrect details,
we'll use this exception type"""
def __init__(self):
pass
class UserException(ClientException):
""" Raised if a user already exists on create"""
def __init__(self):
pass
class CrowdCommunicationExcpetion(ClientException):
"""
Raised if crowd can not be contacted
"""
def __init__(self):
pass
class CrowdClient(object):
"""Provides a simple interface to a crowd server"""
# Map the messages we expect back from Crowd in error
# to an exception type we want to raise
_errorMap = {
'INVALID_SSO_TOKEN': SessionNotFoundException,
'INVALID_USER_AUTHENTICATION': Authent
|
icationFailedException,
'INVALID_USER': UserException,
'USER_NOT_FOUND': UserException
|
}
_token_cache = {}
crowd_user = None
crowd_password = None
def __init__(self, api_url=None, app_name=None, app_pwd=None):
"""Constructor function
Params:
api_url: The URL to the Crowd API
app_name: Application login name for Crowd server
app_pwd: Application password for Crowd server
"""
self.crowd_user = app_name
self.crowd_password = app_pwd
self.crowd_api = api_url
self.use_crowd = None
self.external_opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.ProxyHandler({}))
def config(self, config):
"""
Configure the crowd client
:param config:
:return:nothing
"""
self.crowd_user = config['crowd_app_name']
self.crowd_password = config['crowd_app_password']
self.crowd_api = config['crowd_api_url']
self.use_crowd = config['crowd_use_crowd'].lower() != 'false'
try:
self.external_opener = urllib2.build_opener(
urllib2.ProxyHandler({'http': config['external_http_proxy'],
'https': config['external_https_proxy']})
)
log.info("installed proxied external opener for crowd client")
except KeyError:
self.external_opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.ProxyHandler({}))
log.info("installed non-proxied external opener for crowd client")
def check_authenticated(self, user_name, password):
"""
Checks if the user in question is in the crowd system
:param user_name: Login name of the user to check
:param password: That user's password
:return: User information as JSON if session valid,
raises exception if not
"""
return self._make_request('authentication?username=%s' % user_name, '{ "value": "%s" }' % password)
def create_user_session(self, user_name, password, remote_addr):
"""
Asks the crowd provider for a user session token
:param user_name: Login name of the user
:param password: Password of the user
:param remote_addr: IP address the user is requesting from
:return: User object in JSON containing a 'token' from Crowd
raises exception if invalid credentials
"""
user = UserRequest()
user.username = user_name
user.password = password
user.remote_address = remote_addr
# Now we have enough information to make
# a request to the Crowd API for a session
return self._make_request('session', user.to_json())
def verify_user_session(self, token):
"""
Checks the supplied token against active Crowd sessions
:param token: The Crowd session ID to verify
:return: User information as JSON if session valid,
raises exception if not
"""
# Look for a user, and last access entry in the
# cache...
try:
user, last_access_time = self._token_cache[token]
time_since_last_access = datetime.datetime.now() - last_access_time
if time_since_last_access.seconds > 20:
del[self._token_cache[token]]
else:
log.debug("Found user in cache - no need to call Crowd")
return {
'user': user,
'token': token
}
except KeyError:
server_credentials = self._make_request('session/' + token)
self._token_cache[token] = (server_credentials['user'], datetime.datetime.now())
return server_credentials
return self._make_request('session/' + token)
def delete_session(self, token):
"""
Invalidates the specified session
:param token: Session identifier to invalidate
:return:Nothing
"""
if token in self._token_cache:
del[self._token_cache[token]]
self._make_request('session/' + token, method='DELETE')
def get_user_info(self, username):
"""
Gets a user object from Crowd
:param username: Name of user to get info for
:return: User JSON
"""
return self._make_request('user?username=%s' % username)
def create_user(self, username, first_name, last_name, email, password):
"""
Asks the client to create a user with the given information
:param username: login name for the user
:param first_name: The name given to the user for use in an informal setting
:param last_name: The name of the user's family
:param email: Email address
:param password: User's desired password
:return: nothing
"""
req = UserRequest()
req.username = username
req.first_name = first_name
req.last_name = last_name
req.email = email
req.password = password
return self._make_request('user', data=req.new_user_json())
def update_user(self, username, first_name, last_name, email, password):
"""
Asks the client to update the user record
:param username: login name for the user
:param first_name: The name given to the user for use in an informal setting
:param last_name: The name of the user's family
:param email: Email address
:param password: User's desired password
:return: nothing
"""
req = UserRequest()
req.username = username
req.first_name = first_name
req.last_name = last_name
req.email = email
req.password = password
return self._make_request('user?username=%s' % username, data=req.new_user_json(), method='PUT')
def update_users_password(self, username, password):
"""
Update a users password
:param username: the username
:param password: the new password
:return:nothing
"""
data = simplejson.dumps(
{
'value': password
})
return self._make_request('user/password?username=%s' % username, data=data, method='PUT')
def delete_user(self, username):
"""
Performs a delete on a user
:param username: The login name of the user to delete
:return: nothing
"""
self._make_request('user?username=%s' % username, method='DELETE')
def _make_request(self, resource, data=None, method=None):
"""
Helper function for making requests to the Crowd REST API
:param resource: The REST resource to access
:param data: Optional JSON payload
:param method: The HTTP v
|
chelnak/BotStats
|
app/__init__.py
|
Python
|
mit
| 466
| 0.01073
|
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import flask.ext.restless
app = Flask(__name
|
__)
app.config.from_object('config')
#flask-sqlalchemy
db = SQLAlchemy(app)
from app import models, views
from app.models import Fact, Log
#API
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
ma
|
nager.create_api(Fact, methods=['GET', 'POST', 'DELETE'])
manager.create_api(Log, methods=['GET', 'POST', 'PUT', 'DELETE'])
|
alsrgv/tensorflow
|
tensorflow/python/keras/layers/recurrent_test.py
|
Python
|
apache-2.0
| 54,154
| 0.003638
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import nest
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple('NestedInput', ['t1', 't2'])
NestedState = collections.namedtuple('NestedState', ['s1', 's2'])
@keras_parameterized.run_all_keras_modes
class RNNTest(keras_parameterized.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.r
|
andom((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stac
|
king.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self)
|
w1z2g3/crossbar
|
crossbar/controller/test/test_cli.py
|
Python
|
agpl-3.0
| 10,540
| 0.001328
|
#####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
########################################################
|
#############################
from __future__ import absolute_import, division, print_function
from six import StringIO as NativeStringIO
from twisted.internet.selectreactor import SelectReactor
from crossbar.test import TestCase
from crossbar.controller import cli
from crossbar import _logging
from weakref import WeakKeyDictionary
import os
import sys
import platform
impor
|
t twisted
class CLITestBase(TestCase):
# the tests here a mostly bogus, as they test for log message content,
# not actual functionality
skip = True
def setUp(self):
self._subprocess_timeout = 15
if platform.python_implementation() == 'PyPy':
self._subprocess_timeout = 30
self.stderr = NativeStringIO()
self.stdout = NativeStringIO()
self.patch(_logging, "_stderr", self.stderr)
self.patch(_logging, "_stdout", self.stdout)
self.patch(_logging, "_loggers", WeakKeyDictionary())
self.patch(_logging, "_loglevel", "info")
return super(CLITestBase, self).setUp()
def tearDown(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class VersionTests(CLITestBase):
"""
Tests for `crossbar version`.
"""
def test_basic(self):
"""
Just running `crossbar version` gets us the versions.
"""
reactor = SelectReactor()
cli.run("crossbar",
["version"],
reactor=reactor)
self.assertIn("Crossbar.io", self.stdout.getvalue())
self.assertIn(
("Twisted : \x1b[33m\x1b[1m" + twisted.version.short() + "-SelectReactor"),
self.stdout.getvalue())
def test_debug(self):
"""
Running `crossbar version` will give us the versions, plus the
locations of some of them.
"""
reactor = SelectReactor()
cli.run("crossbar",
["version", "--loglevel=debug"],
reactor=reactor)
self.assertIn("Crossbar.io", self.stdout.getvalue())
self.assertIn(
("Twisted : \x1b[33m\x1b[1m" + twisted.version.short() + "-SelectReactor"),
self.stdout.getvalue())
self.assertIn(
("[twisted.internet.selectreactor.SelectReactor]"),
self.stdout.getvalue())
class StartTests(CLITestBase):
"""
Tests for `crossbar start`.
"""
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
def test_start(self):
"""
A basic start, that doesn't actually enter the reactor.
"""
with open(self.config, "w") as f:
f.write("""{"controller": {}}""")
reactor = SelectReactor()
reactor.run = lambda: False
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
self.assertIn("Entering reactor event loop", self.stdout.getvalue())
def test_configValidationFailure(self):
"""
Running `crossbar start` with an invalid config will print a warning.
"""
with open(self.config, "w") as f:
f.write("")
reactor = SelectReactor()
with self.assertRaises(SystemExit) as e:
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
# Exit with code 1
self.assertEqual(e.exception.args[0], 1)
# The proper warning should be emitted
self.assertIn("*** Configuration validation failed ***",
self.stderr.getvalue())
self.assertIn(("configuration file does not seem to be proper JSON "),
self.stderr.getvalue())
def test_fileLogging(self):
"""
Running `crossbar start --logtofile` will log to cbdir/node.log.
"""
with open(self.config, "w") as f:
f.write("""{"controller": {}}""")
reactor = SelectReactor()
reactor.run = lambda: None
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir), "--logtofile"],
reactor=reactor)
with open(os.path.join(self.cbdir, "node.log"), "r") as f:
logFile = f.read()
self.assertIn("Entering reactor event loop", logFile)
self.assertEqual("", self.stderr.getvalue())
self.assertEqual("", self.stdout.getvalue())
def test_stalePID(self):
with open(self.config, "w") as f:
f.write("""{"controller": {}}""")
with open(os.path.join(self.cbdir, "node.pid"), "w") as f:
f.write("""{"pid": 9999999}""")
reactor = SelectReactor()
reactor.run = lambda: None
cli.run("crossbar",
["start", "--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
self.assertIn(
("Stale Crossbar.io PID file (pointing to non-existing process "
"with PID {pid}) {fp} removed").format(
fp=os.path.abspath(os.path.join(self.cbdir, "node.pid")),
pid=9999999),
self.stdout.getvalue())
class ConvertTests(CLITestBase):
"""
Tests for `crossbar convert`.
"""
def test_unknown_format(self):
"""
Running `crossbar convert` with an unknown config file produces an
error.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.blah")
open(config_file, 'wb').close()
with self.assertRaises(SystemExit) as e:
cli.run("crossbar",
["convert", "--config={}".format(config_file)])
self.assertEqual(e.exception.args[0], 1)
self.assertIn(
("Error: configuration file needs to be '.json' or '.yaml'."),
self.stdout.getvalue())
def test_yaml_to_json(self):
"""
Running `crossbar convert` with a YAML config file will convert it to
JSON.
"""
cbdir = self.mktemp()
os.makedirs(cbdir)
config_file = os.path.join(cbdir, "config.yaml")
with open(config_file, 'w') as f:
f.write("""
foo:
bar: spam
baz:
foo: cat
""")
cli.run("crossbar",
["convert", "--config={}".format(config_file)])
self.assertIn(
("JSON formatted configuration wri
|
KevinJMcGrath/Symphony-Ares
|
modules/plugins/PABot/logging.py
|
Python
|
mit
| 796
| 0.002513
|
import logging.handlers
import os
_pabotlog = logging.getLogger('PABot')
_pabotlog.setLevel(logging.DEBUG)
_log
|
Path = os.path.abspath("./logging/pabot.log")
_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
_consoleStreamHandler = logging.StreamHandler()
_consoleStreamHandler.setLevel(logging.DEBUG)
_consoleStreamHandler.setFormatter(_formatter)
_symLogRotFileHandler = logging.handlers.RotatingFileHandler(_logPath, maxBytes=2000000, backupCount=5)
_symLogRotFileHandler.setLevel(log
|
ging.DEBUG)
_symLogRotFileHandler.setFormatter(_formatter)
_pabotlog.addHandler(_consoleStreamHandler)
_pabotlog.addHandler(_symLogRotFileHandler)
def LogPABotMessage(message):
_pabotlog.info(message)
def LogPABotError(message):
_pabotlog.error(message)
|
nanchenchen/emoticon-analysis
|
emoticonvis/apps/api/serializers.py
|
Python
|
mit
| 4,406
| 0.004766
|
"""
This module defines serializers for the main API data objects:
.. autosummary::
:nosignatures:
DimensionSerializer
FilterSerializer
MessageSerializer
QuestionSerializer
"""
from django.core.paginator import Paginator
from rest_framework import serializers, pagination
import emoticonvis.apps.corpus.models as corpus_models
import emoticonvis.apps.enhance.models as enhance_models
from django.contrib.auth.models import User
# A simple string field that looks up dimensions on deserialization
class MessageSerializer(serializers.ModelSerializer):
"""
JSON representation of :class:`.Message`
objects for the API.
Messages are provided in a simple format that is useful for displaying
examples:
::
{
"id": 52,
"dataset": 2,
"text": "Some sort of thing or other",
"sender": {
"id": 2,
"dataset": 1
"original_id": 2568434,
"username": "my_name",
"full_name": "My Name"
},
"time": "2010-02-25T00:23:53Z"
}
Additional fields may be added later.
"""
class Meta:
model = corpus_models.Message
fields = ('id', 'dataset', 'text', )
class UserSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
return instance.username
class Meta:
model = User
fields = ('username', )
class FeatureVectorSerializer(serializers.Serializer):
message = MessageSerialize
|
r()
tokens = serializers.ListField()
feature_vector = serializers.ListField(child=serializers.DictField())
class FeatureCodeDistributionSerializer(serializers.Serializer):
feature_index = serializers.IntegerField()
feature_text = serializers.CharField()
|
distribution = serializers.ListField(child=serializers.DictField())
class SVMResultSerializer(serializers.Serializer):
results = serializers.DictField()
messages = serializers.ListField(child=FeatureVectorSerializer(), required=True)
class FeatureSerializer(serializers.ModelSerializer):
token_list = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = enhance_models.Feature
fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', 'token_list', )
read_only_fields = ('id', 'dictionary', 'index', 'text', 'document_frequency', )
class PaginatedMessageSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = MessageSerializer
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = corpus_models.Dataset
fields = ('id', 'name', 'description', 'message_count', )
read_only_fields = ('id', 'name', 'description', 'message_count', )
class DictionarySerializer(serializers.ModelSerializer):
dataset = DatasetSerializer()
class Meta:
model = enhance_models.Dictionary
fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
read_only_fields = ('id', 'name', 'time', 'feature_count', 'dataset', )
class CodeAssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = coding_models.CodeAssignment
fields = ('id', 'source', 'message', 'code', 'is_example', 'is_ambiguous', 'is_saved', )
read_only_fields = ('id', 'source', )
class CodeDefinitionSerializer(serializers.Serializer):
code = serializers.CharField(required=False)
source = UserSerializer(required=False)
text = serializers.CharField()
examples = MessageSerializer(many=True, required=False)
class CodeMessageSerializer(serializers.Serializer):
code = serializers.CharField()
source = UserSerializer()
messages = MessageSerializer(many=True)
class DisagreementIndicatorSerializer(serializers.ModelSerializer):
user_assignment = CodeAssignmentSerializer(required=False)
partner_assignment = CodeAssignmentSerializer(required=False)
class Meta:
model = coding_models.DisagreementIndicator
fields = ('id', 'message', 'user_assignment', 'partner_assignment', 'type', )
read_only_fields = ('id', 'message', 'user_assignment', 'partner_assignment', )
class PairwiseSerializer(serializers.Serializer):
user_code = serializers.CharField()
partner_code = serializers.CharField()
count = serializers.IntegerField()
|
FederatedAI/FATE
|
examples/pipeline/data_transform/pipeline-data-transform-dense.py
|
Python
|
apache-2.0
| 2,334
| 0.003428
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=gu
|
est_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_para
|
m(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/lxml/src/lxml/tests/test_xpathevaluator.py
|
Python
|
gpl-2.0
| 23,244
| 0.00271
|
# -*- coding: utf-8 -*-
"""
Test cases related to XPath evaluation and the XPath class
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, _bytes, BytesIO
from common_imports import doctest, make_doctest
class ETreeXPathTestCase(HelperTestCase):
"""XPath tests etree"""
def test_xpath_boolean(self):
tree = self.parse('<a><b></b><b></b></a>')
self.assert_(tree.xpath('boolean(/a/b)'))
self.assert_(not tree.xpath('boolean(/a/c)'))
def test_xpath_number(self):
tree = self.parse('<a>1</a>')
self.assertEquals(1.,
tree.xpath('number(/a)'))
tree = self.parse('<a>A</a>')
actual = str(tree.xpath('number(/a)'))
expected = ['nan', '1.#qnan', 'nanq']
if not actual.lower() in expected:
self.fail('Expected a NAN value, got %s' % actual)
def test_xpath_string(self):
tree = self.parse('<a>Foo</a>')
self.assertEquals('Foo',
tree.xpath('string(/a/text())'))
def test_xpath_document_root(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/'))
def test_xpath_namespace(self):
tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>')
self.assert_((None, "test") in tree.xpath('namespace::*'))
self.assert_(('p', 'myURI') in tree.xpath('namespace::*'))
def test_xpath_namespace_empty(self):
tree = self.parse('<a/>')
self.assertEquals([('xml', 'http://www.w3.org/XML/1998/namespace')],
tree.xpath('namespace::*'))
def test_xpath_list_elements(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals([root[0], root[1]],
tree.xpath('/a/b'))
def test_xpath_list_nothing(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/a/c'))
# this seems to pass a different code path, also should return nothing
self.assertEquals([],
tree.xpath('/a/c/text()'))
def test_xpath_list_text(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals(['Foo', 'Bar'],
tree.xpath('/a/b/text()'))
def test_xpath_list_text_parent(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_text_parent_no_smart_strings(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
s
|
elf.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=True))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in
tree.xpath('/
|
a/b/text()', smart_strings=True)])
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=False))
self.assertEquals([False, False],
[hasattr(r, 'getparent') for r in
tree.xpath('/a/b/text()', smart_strings=False)])
def test_xpath_list_unicode_text_parent(self):
xml = _bytes('<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>').decode("unicode_escape")
tree = self.parse(xml.encode('utf-8'))
root = tree.getroot()
self.assertEquals([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"),
_bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEquals(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(False, hasattr(results[0], 'getparent'))
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEquals(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assert_(el.xpath('boolean(c)'))
self.assert_(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEquals([c[0], c[1]],
c.xpath('b'))
self.assertEquals([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEquals(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEquals(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEquals(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c
|
keepkey/python-keepkey
|
tests/test_msg_verifymessage_segwit_native.py
|
Python
|
lgpl-3.0
| 4,324
| 0.002313
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import unittest
import common
from binascii import unhexlify
class TestMsgVerifymessageSegwitNative(common.KeepKeyTest):
def test_message_long(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('285ff795c29aef7538f8b3bdb2e8add0d0722ad630a140b6aefd504a5a895cbd867cbb00981afc50edd0398211e8d7c304bb8efa461181bc0afa67ea4a720a89ed'),
"VeryLongMessage!" * 64
)
assert ret is True
def test_message_testnet(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Testnet',
'tb1qyjjkmdpu7metqt5r36jf872a34syws336p3n3p',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert ret is True
def test_message_verify(self):
self.setup_mnemonic_nopin_nopassphrase()
# trezor pubkey - OK
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert res is True
# trezor pubkey - FAIL - wrong sig
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be00'),
'This is an example of a signed message.'
)
assert res is False
# trezor pubkey - FAIL - wrong msg
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message!'
)
assert res is False
|
def test_verify_utf(self):
self.setup_mnemonic_nopin_nopassphrase()
words_nfkd = u'Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\
|
u0301l zo\u0301ny u\u0301lu\u030a'
words_nfc = u'P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f'
res_nfkd = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfkd
)
res_nfc = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfc
)
assert res_nfkd is True
assert res_nfc is True
if __name__ == '__main__':
unittest.main()
|
uc-cdis/cdis-python-utils
|
cdispyutils/auth/errors.py
|
Python
|
apache-2.0
| 101
| 0
|
class JWTValid
|
ationError(Exception):
|
pass
class JWTAudienceError(JWTValidationError):
pass
|
Lukasa/cryptography
|
tests/utils.py
|
Python
|
apache-2.0
| 14,122
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific l
|
anguage governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import collections
from contextlib import contextmanager
import pytest
import six
from cryptography.exceptions import UnsupportedAlgorithm
import c
|
ryptography_vectors
HashVector = collections.namedtuple("HashVector", ["message", "digest"])
KeyedHashVector = collections.namedtuple(
"KeyedHashVector", ["message", "digest", "key"]
)
def select_backends(names, backend_list):
if names is None:
return backend_list
split_names = [x.strip() for x in names.split(',')]
# this must be duplicated and then removed to preserve the metadata
# pytest associates. Appending backends to a new list doesn't seem to work
selected_backends = []
for backend in backend_list:
if backend.name in split_names:
selected_backends.append(backend)
if len(selected_backends) > 0:
return selected_backends
else:
raise ValueError(
"No backend selected. Tried to select: {0}".format(split_names)
)
def check_for_iface(name, iface, item):
if name in item.keywords and "backend" in item.funcargs:
if not isinstance(item.funcargs["backend"], iface):
pytest.skip("{0} backend does not support {1}".format(
item.funcargs["backend"], name
))
def check_backend_support(item):
supported = item.keywords.get("supported")
if supported and "backend" in item.funcargs:
if not supported.kwargs["only_if"](item.funcargs["backend"]):
pytest.skip("{0} ({1})".format(
supported.kwargs["skip_message"], item.funcargs["backend"]
))
elif supported:
raise ValueError("This mark is only available on methods that take a "
"backend")
@contextmanager
def raises_unsupported_algorithm(reason):
with pytest.raises(UnsupportedAlgorithm) as exc_info:
yield exc_info
assert exc_info.value._reason is reason
def load_vectors_from_file(filename, loader):
with cryptography_vectors.open_vector_file(filename) as vector_file:
return loader(vector_file)
def load_nist_vectors(vector_data):
test_data = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines, comments, and section headers are ignored
if not line or line.startswith("#") or (line.startswith("[")
and line.endswith("]")):
continue
if line.strip() == "FAIL":
test_data["fail"] = True
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
# Some tests (PBKDF2) contain \0, which should be interpreted as a
# null character rather than literal.
value = value.replace("\\0", "\0")
# COUNT is a special token that indicates a new block of data
if name.upper() == "COUNT":
test_data = {}
data.append(test_data)
continue
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_cryptrec_vectors(vector_data):
cryptrec_list = []
for line in vector_data:
line = line.strip()
# Blank lines and comments are ignored
if not line or line.startswith("#"):
continue
if line.startswith("K"):
key = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("P"):
pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("C"):
ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
# after a C is found the K+P+C tuple is complete
# there are many P+C pairs for each K
cryptrec_list.append({
"key": key,
"plaintext": pt,
"ciphertext": ct
})
else:
raise ValueError("Invalid line in file '{}'".format(line))
return cryptrec_list
def load_hash_vectors(vector_data):
vectors = []
key = None
msg = None
md = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#") or line.startswith("["):
continue
if line.startswith("Len"):
length = int(line.split(" = ")[1])
elif line.startswith("Key"):
# HMAC vectors contain a key attribute. Hash vectors do not.
key = line.split(" = ")[1].encode("ascii")
elif line.startswith("Msg"):
# In the NIST vectors they have chosen to represent an empty
# string as hex 00, which is of course not actually an empty
# string. So we parse the provided length and catch this edge case.
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
elif line.startswith("MD"):
md = line.split(" = ")[1]
# after MD is found the Msg+MD (+ potential key) tuple is complete
if key is not None:
vectors.append(KeyedHashVector(msg, md, key))
key = None
msg = None
md = None
else:
vectors.append(HashVector(msg, md))
msg = None
md = None
else:
raise ValueError("Unknown line in hash vector")
return vectors
def load_pkcs1_vectors(vector_data):
"""
Loads data out of RSA PKCS #1 vector files.
"""
private_key_vector = None
public_key_vector = None
attr = None
key = None
example_vector = None
examples = []
vectors = []
for line in vector_data:
if (
line.startswith("# PSS Example") or
line.startswith("# PKCS#1 v1.5 Signature")
):
if example_vector:
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
attr = None
example_vector = collections.defaultdict(list)
if line.startswith("# Message to be signed"):
attr = "message"
continue
elif line.startswith("# Salt"):
attr = "salt"
continue
elif line.startswith("# Signature"):
attr = "signature"
continue
elif (
example_vector and
line.startswith("# =============================================")
):
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
example_vector = None
attr = None
elif example_vector and line.startswith("#"):
continue
else:
if attr is not None and example_vector is not None:
example_vector[attr].append(line.strip())
continue
if (
line.startswith("# Example") or
line.startswith("# =============================================")
):
if key:
assert private_key_vector
assert public_key_vector
for key, value in six.iteritems(public_key_vector):
hex_str = "".join(v
|
svetlyak40wt/django-dzenlog
|
example/blog/models.py
|
Python
|
bsd-3-clause
| 621
| 0.004831
|
from django.db import mod
|
els
from django.utils.translation import ugettext_lazy as _
from django_dzenlog.models import GeneralPost
class TextPost(GeneralPost):
body_detail_template = 'blog/text_post.html'
feed_description_template = 'blog/text_feed_detail.html'
body = models.TextField(_('Post\'s body'))
class LinkPost(GeneralPost):
body_detail_template = 'blog/link_post.html'
feed_description_template = 'blog/link_feed_detail.html'
|
url = models.URLField(_('URL'), default='http://example.com', verify_exists=False)
description = models.TextField(_('URL\'s description'), blank=True)
|
mkobos/tree_crawler
|
concurrent_tree_crawler/test/subtrees_comparer.py
|
Python
|
mit
| 650
| 0.026154
|
def subtrees_equal(expected_schema_node, actual_node):
if expected_schema_node[0] != actual_node.get_name():
return False
if expected_schema_node[1] != actual_node.get_state():
return False
expected_children = expected_schema_node[2]
actual_children = actual_node.get_children()
actual_children_names = [child.get_name() for child in actual_children]
actual_children_names.sort()
if len(expect
|
ed_children) != len(actual_children_nam
|
es):
return False
for (expected_child, actual_child_name) in \
zip(expected_children, actual_children_names):
subtrees_equal(
expected_child, actual_node.get_child(actual_child_name))
return True
|
amolkahat/pandas
|
pandas/tests/arrays/categorical/test_algos.py
|
Python
|
bsd-3-clause
| 5,452
| 0
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake(object):
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(['a', 'b', 'a'])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=['a', 'b'])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered):
cat = pd.Categorical(['a', 'a', 'b', 'b'], categories=['b', 'a'],
ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'a', 'b'])
result = cat.take([0, -1, -1], allow_fill=True)
expected = pd.Categorical(['a', np.nan, np.nan],
categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = pd.Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allo
|
w_fill=True, fill_value=-1)
expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'b', 'c'])
result = cat.take([0, 1, -1], fill_value='a', allow_fill=True)
expected = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c'])
tm.assert_c
|
ategorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(['a', 'b', 'c'])
xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
with tm.assert_raises_regex(TypeError, xpr):
cat.take([0, 1, -1], fill_value='d', allow_fill=True)
|
nathanielvarona/airflow
|
tests/providers/apache/cassandra/hooks/test_cassandra.py
|
Python
|
apache-2.0
| 8,538
| 0.00164
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from cassandra.cluster import Cluster
from cassandra.policies import (
DCAwareRoundRobinPolicy,
RoundRobinPolicy,
TokenAwarePolicy,
WhiteListRoundRobinPolicy,
)
from airflow.models import Connection
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.utils import db
@pytest.mark.integratio
|
n("cassandra")
class TestCassandraHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='cassandra_test',
conn_type='cassandra',
host='hos
|
t-1,host-2',
port='9042',
schema='test_keyspace',
extra='{"load_balancing_policy":"TokenAwarePolicy","protocol_version":4}',
)
)
db.merge_conn(
Connection(
conn_id='cassandra_default_with_schema',
conn_type='cassandra',
host='cassandra',
port='9042',
schema='s',
)
)
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP SCHEMA IF EXISTS s",
"""
CREATE SCHEMA s WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
""",
]
for cql in cqls:
session.execute(cql)
session.shutdown()
hook.shutdown_cluster()
def test_get_conn(self):
with mock.patch.object(Cluster, "__init__") as mock_cluster_ctor:
mock_cluster_ctor.return_value = None
CassandraHook(cassandra_conn_id='cassandra_test')
mock_cluster_ctor.assert_called_once_with(
contact_points=['host-1', 'host-2'],
port=9042,
protocol_version=4,
load_balancing_policy=mock.ANY,
)
assert isinstance(mock_cluster_ctor.call_args[1]['load_balancing_policy'], TokenAwarePolicy)
def test_get_lb_policy_with_no_args(self):
# test LB policies with no args
self._assert_get_lb_policy('RoundRobinPolicy', {}, RoundRobinPolicy)
self._assert_get_lb_policy('DCAwareRoundRobinPolicy', {}, DCAwareRoundRobinPolicy)
self._assert_get_lb_policy(
'TokenAwarePolicy', {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
def test_get_lb_policy_with_args(self):
# test DCAwareRoundRobinPolicy with args
self._assert_get_lb_policy(
'DCAwareRoundRobinPolicy',
{'local_dc': 'foo', 'used_hosts_per_remote_dc': '3'},
DCAwareRoundRobinPolicy,
)
# test WhiteListRoundRobinPolicy with args
fake_addr_info = [
['family', 'sockettype', 'proto', 'canonname', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)]
]
with mock.patch('socket.getaddrinfo', return_value=fake_addr_info):
self._assert_get_lb_policy(
'WhiteListRoundRobinPolicy', {'hosts': ['host1', 'host2']}, WhiteListRoundRobinPolicy
)
# test TokenAwarePolicy with args
with mock.patch('socket.getaddrinfo', return_value=fake_addr_info):
self._assert_get_lb_policy(
'TokenAwarePolicy',
{
'child_load_balancing_policy': 'WhiteListRoundRobinPolicy',
'child_load_balancing_policy_args': {'hosts': ['host-1', 'host-2']},
},
TokenAwarePolicy,
expected_child_policy_type=WhiteListRoundRobinPolicy,
)
def test_get_lb_policy_invalid_policy(self):
# test invalid policy name should default to RoundRobinPolicy
self._assert_get_lb_policy('DoesNotExistPolicy', {}, RoundRobinPolicy)
# test invalid child policy name should default child policy to RoundRobinPolicy
self._assert_get_lb_policy(
'TokenAwarePolicy', {}, TokenAwarePolicy, expected_child_policy_type=RoundRobinPolicy
)
self._assert_get_lb_policy(
'TokenAwarePolicy',
{'child_load_balancing_policy': 'DoesNotExistPolicy'},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
)
def test_get_lb_policy_no_host_for_allow_list(self):
# test host not specified for WhiteListRoundRobinPolicy should throw exception
self._assert_get_lb_policy(
'WhiteListRoundRobinPolicy', {}, WhiteListRoundRobinPolicy, should_throw=True
)
self._assert_get_lb_policy(
'TokenAwarePolicy',
{'child_load_balancing_policy': 'WhiteListRoundRobinPolicy'},
TokenAwarePolicy,
expected_child_policy_type=RoundRobinPolicy,
should_throw=True,
)
def _assert_get_lb_policy(
self,
policy_name,
policy_args,
expected_policy_type,
expected_child_policy_type=None,
should_throw=False,
):
thrown = False
try:
policy = CassandraHook.get_lb_policy(policy_name, policy_args)
assert isinstance(policy, expected_policy_type)
if expected_child_policy_type:
assert isinstance(policy._child_policy, expected_child_policy_type)
except Exception: # pylint: disable=broad-except
thrown = True
assert should_throw == thrown
def test_record_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO s.t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("s.t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("s.t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_record_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_schema")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS t",
"CREATE TABLE t (pk1 text, pk2 text, c text, PRIMARY KEY (pk1, pk2))",
"INSERT INTO t (pk1, pk2, c) VALUES ('foo', 'bar', 'baz')",
]
for cql in cqls:
session.execute(cql)
assert hook.record_exists("t", {"pk1": "foo", "pk2": "bar"})
assert not hook.record_exists("t", {"pk1": "foo", "pk2": "baz"})
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_cql(self):
hook = CassandraHook("cassandra_default")
session = hook.get_conn()
cqls = [
"DROP TABLE IF EXISTS s.t",
"CREATE TABLE s.t (pk1 text PRIMARY KEY)",
]
for cql in cqls:
session.execute(cql)
assert hook.table_exists("s.t")
assert not hook.table_exists("s.u")
session.shutdown()
hook.shutdown_cluster()
def test_table_exists_with_keyspace_from_session(self):
hook = CassandraHook("cassandra_default_with_sc
|
hasadna/knesset-data-pipelines
|
people/plenum_session_voters.py
|
Python
|
mit
| 2,135
| 0.000468
|
from datapackage_pipelines.wrapper import ingest, spew
def get_votes(resource, data, stats):
data['session_voters'] = {}
stats['num_votes'] = 0
stats['num_vote_mks'] = 0
for vote in resource:
voters = data['session_voters'].setdefault(vote['session_id'], set())
for attr in ['mk_ids_pro', 'mk_ids_against', 'mk_ids_abstain']:
mk_ids = vote[attr]
if mk_ids:
for mk_id in mk_ids:
voters.add(mk_id)
stats['num_vote_mks'] += 1
st
|
ats['num_votes'] += 1
def get_plenum(resource, data, stats):
stats.update(known_sessions=0, unknown_sessions=0)
for session in resource:
if session['PlenumSessionID'] in data['session_voters']:
stats['known_sessions'] += 1
session['voter_mk_ids'] = list(data['session_voters'][session['PlenumSessionID']])
else:
session['voter_mk_ids'] = None
|
stats['unknown_sessions'] += 1
if not session['voter_mk_ids']:
session['voter_mk_ids'] = None
yield session
def get_resources(resources, stats, data):
for i, resource in enumerate(resources):
if i == data['votes_index']:
get_votes(resource, data, stats)
elif i == data['plenum_index']:
yield get_plenum(resource, data, stats)
else:
yield resource
def get_datapackage(datapackage, data):
for i, descriptor in enumerate(datapackage['resources']):
if descriptor['name'] == 'view_vote_rslts_hdr_approved':
data['votes_index'] = i
elif descriptor['name'] == 'kns_plenumsession':
data['plenum_index'] = i
fields = [{'name': 'voter_mk_ids', 'type': 'array'}]
descriptor['schema']['fields'] += fields
del datapackage['resources'][data['votes_index']]
return datapackage
def main():
parameters, datapackage, resources, stats, data = ingest() + ({}, {})
spew(get_datapackage(datapackage, data),
get_resources(resources, stats, data),
stats)
if __name__ == '__main__':
main()
|
mostafa-mahmoud/sahwaka
|
lib/evaluator.py
|
Python
|
apache-2.0
| 13,538
| 0.002955
|
#!/usr/bin/env python
"""
A module that provides functionalities for calculating error metrics
and evaluates the given recommender.
"""
import numpy
from util.top_recommendations import TopRecommendations
class Evaluator(object):
"""
A class for computing evaluation metrics and splitting the input data.
"""
def __init__(self, ratings, abstracts_preprocessor=None, random_seed=False,
verbose=False):
"""
Initialize an evaluator array with the initial actual ratings matrix.
:param int[][] ratings: A numpy array containing the initial ratings.
:param AbstractsPreprocessor abstracts_preprocessor: A list of the abstracts.
:param bool random_seed: if False, we will use a fixed seed.
:param bool verbose: A flag deciding to print progress
"""
self.ratings = ratings
self.n_users, self.n_items = ratings.shape
if abstracts_preprocessor:
self.abstracts_preprocessor = abstracts_preprocessor
self.random_seed = random_seed
self._verbose = verbose
self.k_folds = None
if self._verbose:
print('%d users and %d items' % (self.n_users, self.n_items))
# stores recommended indices for each user.
self.recommendation_indices = [[] for i in range(self.ratings.shape[0])]
# False if recommendations have not been loaded yet and vice versa.
self.recs_loaded = False
def get_abstracts_preprocessor(self):
"""
Getter for the Abstracts preprocessor.
:returns: abstracts preprocessor
:rtype: AbstractsPreprocessor
"""
return self.abstracts_preprocessor
def get_ratings(self):
"""
Getter for the ratings matrix.
:returns: Ratings matrix
:rtype: ndarray
"""
return self.ratings
def set_kfolds(self, kfolds):
"""
Set the k-folds
:param int kfolds: the number of the folds in K-fold
"""
self.k_folds = kfolds
self.test_percentage = 1.0 / self.k_folds
def naive_split(self, type='user'):
"""
Split the data into training and testing sets.
:returns: a tuple of train and test data.
:rtype: tuple
"""
if type == 'user':
return self.naive_split_users()
return self.naive_split_items()
def naive_split_users(self):
"""
Split the ratings into test and train
|
data for every user.
:returns: a tuple of train and test data.
:rtype: tuple
"""
if self.random_see
|
d is False:
numpy.random.seed(42)
test = numpy.zeros(self.ratings.shape)
train = self.ratings.copy()
for user in range(self.ratings.shape[0]):
non_zeros = self.ratings[user, :].nonzero()[0]
test_ratings = numpy.random.choice(non_zeros,
size=int(self.test_percentage * len(non_zeros)))
train[user, test_ratings] = 0.
test[user, test_ratings] = self.ratings[user, test_ratings]
assert(numpy.all((train * test) == 0))
self.test_indices = test
return train, test
def naive_split_items(self):
"""
Split the ratings on test and train data by removing random documents.
:returns: a tuple of train and test data.
:rtype: tuple
"""
if self.random_seed is False:
numpy.random.seed(42)
indices = list(range(self.n_items))
test_ratings = numpy.random.choice(indices, size=int(self.test_percentage * len(indices)))
train = self.ratings.copy()
test = numpy.zeros(self.ratings.shape)
for index in test_ratings:
train[:, index] = 0
test[:, index] = self.ratings[:, index]
assert(numpy.all((train * test) == 0))
return train, test
def get_fold(self, fold_num, fold_test_indices):
"""
Returns train and test data for a given fold number
:param int fold_num: the fold index to be returned
:param int[] fold_test_indices: A list of the indicies of the testing fold.
:returns: tuple of training and test data
:rtype: 2-tuple of 2d numpy arrays
"""
current_test_fold_indices = []
index = fold_num
for ctr in range(self.ratings.shape[0]):
current_test_fold_indices.append(fold_test_indices[index])
index += self.k_folds
return self.generate_kfold_matrix(current_test_fold_indices)
def get_kfold_indices(self):
"""
Returns the indices for rating matrix for each kfold split. Where each test set
contains ~1/k of the total items a user has in their digital library.
:returns: a list of all indices of the training set and test set.
:rtype: list of lists
"""
if self.random_seed is False:
numpy.random.seed(42)
test_indices = []
for user in range(self.ratings.shape[0]):
# Indices for all items in the rating matrix.
item_indices = numpy.arange(self.ratings.shape[1])
# Indices of all items in user's digital library.
rated_items_indices = self.ratings[user].nonzero()[0]
mask = numpy.ones(len(self.ratings[user]), dtype=bool)
mask[[rated_items_indices]] = False
# Indices of all items not in user's digital library.
non_rated_indices = item_indices[mask]
# Shuffle all rated items indices
numpy.random.shuffle(rated_items_indices)
# Size of 1/k of the total user's ratings
size_of_test = round((1.0 / self.k_folds) * len(rated_items_indices))
# 2d List that stores all the indices of each test set for each fold.
test_ratings = [[] for x in range(self.k_folds)]
counter = 0
numpy.random.shuffle(non_rated_indices)
# List that stores the number of indices to be added to each test set.
num_to_add = []
# create k different folds for each user.
for index in range(self.k_folds):
if index == self.k_folds - 1:
test_ratings[index] = numpy.array(rated_items_indices[counter:len(rated_items_indices)])
else:
test_ratings[index] = numpy.array(rated_items_indices[counter:counter + size_of_test])
counter += size_of_test
# adding unique zero ratings to each test set
num_to_add.append(int((self.ratings.shape[1] / self.k_folds) - len(test_ratings[index])))
if index > 0 and num_to_add[index] != num_to_add[index - 1]:
addition = non_rated_indices[index * (num_to_add[index - 1]):
(num_to_add[index - 1] * index) + num_to_add[index]]
else:
addition = non_rated_indices[index * (num_to_add[index]):num_to_add[index] * (index + 1)]
test_ratings[index] = numpy.append(test_ratings[index], addition)
test_indices.append(test_ratings[index])
self.test_indices = test_indices
return test_indices
def generate_kfold_matrix(self, test_indices):
"""
Returns a training set and a training set matrix for one fold.
This method is to be used in conjunction with get_kfold_indices()
:param int[] test_indices: array of test set indices.
:returns: Training set matrix and Test set matrix.
:rtype: 2-tuple of 2d numpy arrays
"""
train_matrix = numpy.zeros(self.ratings.shape)
test_matrix = numpy.zeros(self.ratings.shape)
for user in range(train_matrix.shape[0]):
train_indices = list(set(range(self.n_items)) - set(test_indices[user]))
test_matrix[user, test_indices[user]] = self.ratings[user, test_indices[user]]
train_matrix[user, train_indices] = self.ratings[user, train_indices]
return train_matrix, test
|
MediaMath/t1-python
|
terminalone/models/concept.py
|
Python
|
apache-2.0
| 729
| 0
|
# -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'stat
|
us': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Con
|
cept, self).__init__(session, properties, **kwargs)
|
bd4/monster-hunter-scripts
|
db/_pathfix.py
|
Python
|
mit
| 251
| 0
|
"""
Hack to get scripts to run from source checkout without having to set
PYTHONPATH.
"""
import sys
from
|
os.path import dirname, join, abspath
db_path = dirname(__file__)
project_path = abspath(join(db_path, ".."))
|
sys.path.insert(0, project_path)
|
mfalaize/homelab
|
compta/management/commands/check_operations.py
|
Python
|
gpl-3.0
| 3,142
| 0.003196
|
import calendar
from datetime import date
from django.conf import settings
from django.core.mail import send_mail
from django.core.management import BaseCommand
from django.template.loader import get_template
from compta.bank import get_bank_class
from compta.models import Compte
def generate_mail():
# Dernier jour du mois, on envoie un mail pour les comptes joints afin de fournir les sommes à y déposer
if date.today().day == calendar.monthrange(date.today().year, date.today().month)[1]:
comptes = Compte.objects.all()
for compte in comptes:
if compte.utilisateurs.count() > 1:
compte.calculer_parts()
if compte.total_salaire > 0:
html_content = get_template('compta/details_calcule_a_verser.html').render(locals())
mails = []
for user in compte.utilisateurs_list:
if user.email is not None:
mails.append(user.email)
if len(mails) > 0:
send_mail(
'[Homelab] Sommes à verser sur {}'.format(str(compte)),
"",
settings.DEFAULT_FROM_EMAIL, mails, html_message=html_content)
def check_operations():
"""Récupère les dernières op
|
érations bancaires en ligne, inscrit les nouvelles en base et les envoie par mail"""
comptes = Compte.objects.all()
|
for compte in comptes:
operations = compte.operation_set.all()
bank_class = get_bank_class(compte.identifiant.banque)
has_changed = False
with bank_class(compte.identifiant.login, compte.identifiant.mot_de_passe, compte.numero_compte) as bank:
new_operations = bank.fetch_last_operations()
new_solde = bank.fetch_balance()
for new_operation in new_operations:
found = False
for operation in operations:
if operation.date_valeur == new_operation.date_valeur and operation.libelle == new_operation.libelle and float(operation.montant) == float(new_operation.montant):
found = True
break
if not found:
new_operation.compte = compte
new_operation.save()
has_changed = True
if compte.solde != new_solde:
compte.solde = new_solde
compte.save()
if has_changed:
mails = []
for user in compte.utilisateurs.all():
if user.email is not None:
mails.append(user.email)
if len(mails) > 0:
send_mail(
'[Homelab] De nouvelles opérations sont à catégoriser sur {}'.format(str(compte)),
"",
settings.DEFAULT_FROM_EMAIL, mails)
generate_mail()
class Command(BaseCommand):
help = "Déclenche le script qui vérifie les nouvelles opérations bancaires et qui envoie des mails lorsqu'il y en a des nouvelles"
def handle(self, *args, **options):
check_operations()
|
scikit-learn-contrib/categorical-encoding
|
category_encoders/leave_one_out.py
|
Python
|
bsd-3-clause
| 11,063
| 0.002983
|
"""Leave one out coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'hbghhy'
class LeaveOneOutEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""Leave one out coding for categorical features.
This is very similar to target encoding but excludes the current row's
target when calculating the mean target for a level to reduce the effect
of outliers.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing
data are untouched). Sigma gives the standard deviation (spread or "width") of the normal distribution.
The optimal value is commonly between 0.05 and 0.6. The default is to not add noise, but that leads
to significantly suboptimal results.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = LeaveOneOutEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Strategies to encode categorical variables with many categories, from
https://www.kaggle.com/c/caterpillar-tube-pricing/discussion/15748#143154.
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self.fit_leave_one_out(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self.transform_leave_one_out(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_leave_one_out(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
c
|
ols = X.columns.values
self._mean = y.mean()
return {col: self.fit_column_map(X[col], y) for col in cols}
def fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict(
|
[(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def transform_leave_one_out(self, X_in, y
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_zlib.py
|
Python
|
mit
| 6,045
| 0.00397
|
import zlib
from test_support import TestFailed
import sys
import imp
try:
t = imp.find_module('test_zlib')
file = t[0]
except ImportError:
file = open(__file__)
buf = file.read() * 8
file.close()
# test the checksums (hex so the test doesn't break on 64-bit machines)
print hex(zlib.crc32('penguin')), hex(zlib.crc32('penguin', 1))
print hex(zlib.adler32('penguin')), hex(zlib.adler32('penguin', 1))
# make sure we generate some expected errors
try:
zlib.compress('ERROR', zlib.MAX_WBITS + 1)
except zlib.error, msg:
print "expecting", msg
try:
zlib.compressobj(1, 8, 0)
except ValueError, msg:
print "expecting", msg
try:
zlib.decompressobj(0)
except ValueError, msg:
print "expecting", msg
x = zlib.compress(buf)
y = zlib.decompress(x)
if buf != y:
print "normal compression/decompression failed"
else:
print "normal compression/decompression succeeded"
buf = buf * 16
co = zlib.compressobj(8, 8, -15)
x1 = co.compress(buf)
x2 = co.flush()
x = x1 + x2
dc = zlib.decompressobj(-15)
y1 = dc.decompress(x)
y2 = dc.flush()
y = y1 + y2
if buf != y:
print "compress/decompression obj failed"
else:
print "compress/decompression obj succeeded"
co = zlib.compressobj(2, 8, -12, 9, 1)
bufs = []
for i in range(0, len(buf), 256):
bufs.append(co.compress(buf[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
decomp1 = zlib.decompress(combuf, -12, -5)
if decomp1 != buf:
print "decompress with init options failed"
else:
print "decompress with init options succeeded"
deco = zlib.decompressobj(-12)
bufs = []
for i in range(0, len(combuf), 128):
bufs.append(deco.decompress(combuf[i:i+128]))
bufs.append(deco.flush())
decomp2 = ''.join(bufs)
if decomp2 != buf:
print "decompressobj with init options failed"
else:
print "decompressobj with init options succeeded"
print "should be '':", `deco.unconsumed_tail`
# Check a decompression object with max_length specified
deco = zlib.
|
decompressobj(-12)
cb = combuf
bufs = []
while cb:
max_length = 1 + len(cb)/10
chunk = deco.decompress(cb, max_length)
if len(chunk) > max_length:
print 'chunk too big (%d>%d)' % (len(chunk),max_length)
bufs.append(chunk
|
)
cb = deco.unconsumed_tail
bufs.append(deco.flush())
decomp2 = ''.join(buf)
if decomp2 != buf:
print "max_length decompressobj failed"
else:
print "max_length decompressobj succeeded"
# Misc tests of max_length
deco = zlib.decompressobj(-12)
try:
deco.decompress("", -1)
except ValueError:
pass
else:
print "failed to raise value error on bad max_length"
print "unconsumed_tail should be '':", `deco.unconsumed_tail`
# Test flush() with the various options, using all the different levels
# in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt if hasattr(zlib, opt)]
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
d = obj.compress( buf[:3000] )
d = d + obj.flush( sync )
d = d + obj.compress( buf[3000:] )
d = d + obj.flush()
if zlib.decompress(d) != buf:
print "Decompress failed: flush mode=%i, level=%i" % (sync,level)
del obj
# Test for the odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
random.seed(1)
print 'Testing on 17K of random data'
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Create compressor and decompressor objects
c=zlib.compressobj(9)
d=zlib.decompressobj()
# Try 17K of data
# generate random data stream
a=""
for i in range(17*1024):
a=a+chr(random.randint(0,255))
# compress, sync-flush, and decompress
t = d.decompress( c.compress(a)+c.flush(zlib.Z_SYNC_FLUSH) )
# if decompressed data is different from the input data, choke.
if len(t) != len(a):
print len(a),len(t),len(d.unused_data)
raise TestFailed, "output of 17K doesn't match"
def ignore():
"""An empty function with a big string.
Make the compression algorithm work a little harder.
"""
"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
|
Purg/SMQTK
|
bin/memex/hackathon_2016_07/cp1/ad_image_classification/cnn_finetuning/score_eval_data.py
|
Python
|
bsd-3-clause
| 5,443
| 0.003858
|
import csv
import collections
import json
import numpy
from smqtk.utils.bin_utils import logging, initialize_logging
from smqtk.representation.data_set.memory_set import DataMemorySet
from smqtk.algorithms.descriptor_generator.caffe_descriptor import CaffeDescriptorGenerator
from smqtk.algorithms.classifier.index_label import IndexLabelClassifier
from smqtk.representation import ClassificationElementFactory
from smqtk.representation.classification_element.memory import MemoryClassificationElement
from smqtk.representation.descriptor_index.memory import MemoryDescriptorIndex
# in-memory data-set file cache
EVAL_DATASET = "eval.dataset.pickle"
CAFFE_DEPLOY = "CHANGE_ME"
CAFFE_MODEL = "CHANGE_ME"
CAFFE_IMG_MEAN = "CHANGE_ME"
# new-line separated file of index labels.
# Line index should correspont to caffe train/test truth labels.
CAFFE_LABELS = "labels.txt"
# CSV file detailing [cluster_id, ad_id, image_sha1] relationships.
EVAL_CLUSTERS_ADS_IMAGES_CSV = "eval.CP1_clusters_ads_images.csv"
# json-lines file of clusters missing from the above file. Should be at least
# composed of: {"cluster_id": <str>, ... }
EVAL_MISSING_CLUSTERS = "eval.cluster_scores.missing_clusters.jl"
OUTPUT_DESCR_PROB_INDEX = "cp1_img_prob_descriptors.pickle"
OUTPUT_MAX_JL = "cp1_scores_max.jl"
OUTPUT_AVG_JL = "cp1_scores_avg.jl"
###############################################################################
# Compute classification scores
initialize_logging(logging.getLogger('smqtk'), logging.DEBUG)
eval_data_set = DataMemorySet(EVAL_DATASET)
img_prob_descr_index = MemoryDescriptorIndex(OUTPUT_DESCR_PROB_INDEX)
img_prob_gen = CaffeDescriptorGenerator(CAFFE_DEPLOY, CAFFE_MODEL, CAFFE_IMG_MEAN,
'prob', batch_size=1000, use_gpu=True,
load_truncated_images=True)
img_c_mem_factory = ClassificationElementFactory(
MemoryClassificationElement, {}
)
img_prob_classifier = IndexLabelClassifier(CAFFE_LABELS)
eval_data2descr = {}
d_to_proc = set()
for data in eval_data_set:
if not img_prob_descr_index.has_descriptor(data.uuid()):
d_to_proc.add(data)
else:
eval_data2descr[data] = img_prob_descr_index[data.uuid()]
if d_to_proc:
eval_data2descr.update(
img_prob_gen.compute_descriptor_async(d_to_proc)
)
d_to_proc.clear()
assert len(eval_data2descr) == eval_data_set.count()
index_additions = []
for data in d_to_proc:
index_additions.append( eval_data2descr[data] )
print "Adding %d new descriptors to prob index" % len(index_additions)
img_prob_descr_index.add_many_descriptors(index_additions)
eval_descr2class = img_prob_classifier.classify_async(eval_data2descr.values(), img_c_mem_factory)
###############################################################################
# The shas that were actually computed
computed_shas = {e.uuid() for e in eval_data2descr}
len(computed_shas)
cluster2ads = collections.defaultdict(set)
cluster2shas = collections.defaultdict(set)
ad2shas = collections.defaultdict(set)
sha2ads = collections.defaultdict(set)
with open(EVAL_CLUSTERS_ADS_IMAGES_CSV) as f:
reader = csv.reader(f)
for i, r in enumerate(reader):
if i == 0:
# skip header line
continue
c, ad, sha = r
if sha in computed_shas:
cluster2ads[c].add(ad)
cluster2shas[c].add(sha)
ad2shas[ad].add(sha)
sha2ads[sha].add(ad)
assert len(sha2ads) == len(computed_shas)
###############################################################################
print "Collecting scores for SHA1s"
sha2score = {}
for c in eval_descr2class.values():
sha2score[c.uuid] = c['positive']
print "Collecting scores for ads (MAX and AVG)"
ad2score_max = {}
ad2score_avg = {}
for ad, child_shas in ad2shas.iteritems():
scores = [sha2score[sha] for sha in child_shas]
ad2score_max[ad] = numpy.max(scores)
ad2score_avg[ad] = numpy.average(scores)
# select cluster score from max and average of child ad scores
print "Collecting scores for ads (MAX and AVG)"
cluster2score_max = {}
cluster2score_avg = {}
for c, child_ads in cluster2ads.iteritems():
cluster2score_max[c] = numpy.max( [ad2score_max[ad] for ad in child_ads])
cluster2score_avg[c] = numpy.average([ad2score_avg[ad] for ad in child_ads])
len(cluster2score_max)
###############################################################################
missing_clusters = {json.loads(l)['cluster_id'] for l in open(EVAL_MISSING_CLUSTERS)}
cluster_id_order = sorted(set(cluster2score_avg) | missing_clusters)
with open(OUTPUT_MAX_JL, 'w') as f:
for c in cluster_id_order:
if c in cluster2score_max:
|
f.write( json.dumps({"cluster_id": c, "score": float(cluster2score_max[c])}) + '\n' )
else:
# Due to a cluster having no child ads with imagery
print "No childred with images for cluster '%s'" % c
f.write( json.dumps({"cluster_id": c, "score": 0.5}) + '\n' )
with open(OUTPUT_AVG_JL, 'w') as f:
for c in cluster_id_order:
if c in cluster2score_avg:
f.write( json.dumps({"cluster_id": c, "score
|
": float(cluster2score_avg[c])}) + '\n' )
else:
# Due to a cluster having no child ads with imagery
print "No childred with images for cluster '%s'" % c
f.write( json.dumps({"cluster_id": c, "score": 0.5}) + '\n' )
|
Elico-Corp/openerp-7.0
|
balance_sheet_extended/__init__.py
|
Python
|
agpl-3.0
| 170
| 0
|
# -*- coding:
|
utf-8 -*-
#
|
© 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
import report
import wizard
|
googleapis/python-bigquery-storage
|
samples/snippets/__init__.py
|
Python
|
apache-2.0
| 601
| 0
|
# -*- coding: utf-8 -*-
#
# Copy
|
right 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir
|
ed by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
acockburn/appdaemon
|
appdaemon/sequences.py
|
Python
|
mit
| 4,617
| 0.002816
|
import uuid
import asyncio
from appdaemon.appdaemon import AppDaemon
class Sequences:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.logger = ad.logging.get_child("_sequences")
async def run_sequence_service(self, namespace, domain, service, kwargs):
if "entity_id" not in kwargs:
self.logger.warning("entity_id not given in service call, so will not be executing %s", service)
return
# await self.run_sequence("_services", namespace, kwargs["entity_id"])
self.AD.thread_async.call_async_no_wait(self.run_sequence, "_services", namespace, kwargs["entity_id"])
async def add_sequences(self, sequences):
for sequence in sequences:
entity = "sequence.{}".format(sequence)
attributes = {
"friendly_name": sequences[sequence].get("name", sequence),
"loop": sequences[sequence].get("loop", False),
"steps": sequences[sequence]["steps"],
}
if not await self.AD.state.entity_exists("rules", entity):
# it doesn't exist so add it
await self.AD.state.add_entity(
"rules", entity, "idle", attributes=attributes,
)
else:
await self.AD.state.set_state(
"_sequences", "rules", entity, state="idle", attributes=attributes, replace=True
)
async def remove_sequences(self, sequences):
if not isinstance(sequences, list):
sequences = [sequences]
for sequence in sequences:
await self.AD.state.remove_entity("rules", "sequence.{}".format(sequence))
async def run_sequence(self, _name, namespace, seq
|
uence):
coro = self.prep_sequence(_name, namespace, sequence)
#
# OK, lets run it
#
future = asyncio.ensure_future(coro)
self.AD.futures.add_future(_name, future)
return future
async def prep_sequence(self, _name, namespace, sequence):
|
ephemeral_entity = False
loop = False
if isinstance(sequence, str):
entity_id = sequence
if await self.AD.state.entity_exists("rules", entity_id) is False:
self.logger.warning('Unknown sequence "%s" in run_sequence()', sequence)
return None
entity = await self.AD.state.get_state("_services", "rules", sequence, attribute="all")
seq = entity["attributes"]["steps"]
loop = entity["attributes"]["loop"]
else:
#
# Assume it's a list with the actual commands in it
#
entity_id = "sequence.{}".format(uuid.uuid4().hex)
# Create an ephemeral entity for it
ephemeral_entity = True
await self.AD.state.add_entity("rules", entity_id, "idle", attributes={"steps": sequence})
seq = sequence
coro = await self.do_steps(namespace, entity_id, seq, ephemeral_entity, loop)
return coro
@staticmethod
async def cancel_sequence(_name, future):
future.cancel()
async def do_steps(self, namespace, entity_id, seq, ephemeral_entity, loop):
await self.AD.state.set_state("_sequences", "rules", entity_id, state="active")
try:
while True:
for step in seq:
for command, parameters in step.items():
if command == "sleep":
await asyncio.sleep(float(parameters))
elif command == "sequence":
# Running a sub-sequence so just recurse
await self.prep_sequence("_sequence", namespace, parameters)
pass
else:
domain, service = str.split(command, "/")
if "namespace" in parameters:
ns = parameters["namespace"]
del parameters["namespace"]
else:
ns = namespace
parameters["__name"] = entity_id
await self.AD.services.call_service(ns, domain, service, parameters)
if loop is not True:
break
finally:
await self.AD.state.set_state("_sequences", "rules", entity_id, state="idle")
if ephemeral_entity is True:
await self.AD.state.remove_entity("rules", entity_id)
|
jonocodes/gedit-code-assistance
|
backends/xml/gcpbackendxml/backend.py
|
Python
|
gpl-3.0
| 1,683
| 0.001783
|
# gcp xml backend
# Copyright (C) 2012 Jesse van den Kieboom <jessevdk@gnome.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should
|
have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject, Gcp
from document import Document
class Backend(GObject.Object, Gcp.Backend):
|
size = GObject.property(type=int, flags = GObject.PARAM_READABLE)
def __init__(self):
GObject.Object.__init__(self)
self.documents = []
def do_get_property(self, spec):
if spec.name == 'size':
return len(self.documents)
GObject.Object.do_get_property(self, spec)
def do_register_document(self, doc):
d = Document(document=doc)
self.documents.append(d)
d.connect('changed', self.on_document_changed)
return d
def do_unregister_document(self, doc):
doc.disconnect_by_func(self.on_document_changed)
self.documents.remove(doc)
def do_get(self, idx):
return self.documents[idx]
def on_document_changed(self, doc):
doc.update()
# ex:ts=4:et:
|
emoitzi/django-excel-viewer
|
users/__init__.py
|
Python
|
gpl-3.0
| 44
| 0.022727
|
defa
|
ult_app_config = 'users.apps.UserCon
|
fig'
|
ikvk/imap_tools
|
examples/fetch_by_pages.py
|
Python
|
apache-2.0
| 605
| 0.003306
|
from imap_tools import MailBox
with MailBox('imap.mail.com').login(
|
'test@mail.com', 'pwd', 'INBOX') as mailbox:
criteria = 'ALL'
found_nums = mailbox.numbers(criteria)
page_len = 3
pages = int(len(found_nums) // page_len) + 1 if len(found_nums) % page_len else int(len(found_nums) // page_len)
for page in range(pages):
print('page {}'.format(page))
page_limit = slice(page * page_len, page * page_len + page_len)
|
print(page_limit)
for msg in mailbox.fetch(criteria, bulk=True, limit=page_limit):
print(' ', msg.date, msg.uid, msg.subject)
|
konstantinKim/vd-backend
|
app/materials/models.py
|
Python
|
mit
| 2,427
| 0.021426
|
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
db = SQLAlchemy(session_options={"autoflush": False})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class Materials(db.Model, CRUD):
__tablename__ = 'materials'
MATERIAL_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
cn_id = db.Column(db.Integer)
pt_id = db.Column(db.Integer)
class MaterialsSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
|
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/materials/"
else:
if 'attributes' in data:
self_link = "/materials/{}".forma
|
t(data['attributes']['MATERIAL_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials'
class MaterialsSalvage(db.Model, CRUD):
__tablename__ = 'materials_salvage'
MATERIAL_SALVAGE_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
class MaterialsSalvageSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_SALVAGE_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/materials/salvage/"
else:
if 'attributes' in data:
self_link = "/materials/salvage/{}".format(data['attributes']['MATERIAL_SALVAGE_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials_salvage'
|
marwoodandrew/superdesk-aap
|
server/aap/data_consistency/compare_repositories.py
|
Python
|
agpl-3.0
| 9,289
| 0.002153
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
import requests
from superdesk.utc import utcnow
from eve.utils import ParsedRequest
import json
from superdesk.default_settings import ELASTICSEARCH_INDEX, ELASTICSEARCH_URL
class CompareRepositories(superdesk.Command):
default_page_size = 500
option_list = [
superdesk.Option('--resource', '-r', dest='resource_name', required=True),
superdesk.Option('--analysiscount', '-a', dest='analysis_count', required=True)
]
resource_name = ''
analysis_count = 100
def g
|
et_mongo_items(self, consistency_record):
# get the records from mongo in chunks
projection = dict(superdesk.resources[self.resource_name].endpoint_schema['datasource']['projection'])
superdesk.resources[self.resource_name].endpoint_schema['datasource']['proj
|
ection'] = None
service = superdesk.get_resource_service(self.resource_name)
cursor = service.get_from_mongo(None, {})
count = cursor.count()
no_of_buckets = len(range(0, count, self.default_page_size))
mongo_items = []
updated_mongo_items = []
request = ParsedRequest()
request.projection = json.dumps({'_etag': 1, '_updated': 1})
for x in range(0, no_of_buckets):
skip = x * self.default_page_size
print('Page : {}, skip: {}'.format(x + 1, skip))
# don't get any new records since the elastic items are retrieved
cursor = service.get_from_mongo(request, {'_created': {'$lte': consistency_record['started_at']}})
cursor.skip(skip)
cursor.limit(self.default_page_size)
cursor = list(cursor)
mongo_items.extend([(mongo_item['_id'], mongo_item['_etag']) for mongo_item in cursor])
updated_mongo_items.extend([mongo_item['_id'] for mongo_item in cursor
if mongo_item['_updated'] > consistency_record['started_at']])
superdesk.resources[self.resource_name].endpoint_schema['datasource']['projection'] = projection
return mongo_items, updated_mongo_items
def get_mongo_item(self, id):
service = superdesk.get_resource_service(self.resource_name)
return list(service.get_from_mongo(None, {'_id': id}))[0]
def get_elastic_item(self, id):
resource = superdesk.get_resource_service(self.resource_name)
query = {'query': {'filtered': {'filter': {'term': {'_id': id}}}}}
request = ParsedRequest()
request.args = {'source': json.dumps(query)}
items = resource.get(req=request, lookup=None)
return items[0]
def get_elastic_items(self, elasticsearch_index, elasticsearch_url):
# get the all hits from elastic
post_data = {'fields': ['_etag']}
response = requests.get('{}/{}/{}'.format(elasticsearch_url,
elasticsearch_index, '_search?size=5000&q=*:*'), params=post_data)
elastic_results = response.json()["hits"]['hits']
elastic_items = [(elastic_item['_id'], elastic_item["fields"]['_etag'][0])
for elastic_item in elastic_results]
return elastic_items
def process_results(self,
consistency_record,
elastic_items,
mongo_items,
updated_mongo_items,
analyse_differences=True):
# form the sets
mongo_item_ids = list(map(list, zip(*mongo_items)))[0]
mongo_item_ids_set = set(mongo_item_ids)
elastic_item_ids = list(map(list, zip(*elastic_items)))[0]
elastic_item_ids_set = set(elastic_item_ids)
mongo_items_set = set(mongo_items)
elastic_items_set = set(elastic_items)
updated_mongo_items_set = set(updated_mongo_items)
differences = []
# items that exist both in mongo and elastic with the same etags
shared_items = mongo_items_set & elastic_items_set
# items that exist only in mongo but not in elastic
mongo_only = mongo_item_ids_set - elastic_item_ids_set
# items that exist only in elastic but not in mongo
elastic_only = elastic_item_ids_set - mongo_item_ids_set
# items that exist both in mongo and elastic with different etags
# filter out the ones that has been updated since elastic is queried
different_items = (elastic_items_set ^ mongo_items_set) - updated_mongo_items_set
if len(different_items) > 0:
different_items = set(list(map(list, zip(*list(different_items))))[0]) \
- updated_mongo_items_set \
- mongo_only \
- elastic_only
if analyse_differences:
differences = self.analyse_differences(different_items)
consistency_record['completed_at'] = utcnow()
consistency_record['mongo'] = len(mongo_items)
consistency_record['elastic'] = len(elastic_items)
consistency_record['identical'] = len(shared_items)
consistency_record['mongo_only'] = len(mongo_only)
consistency_record['mongo_only_ids'] = list(mongo_only)
consistency_record['elastic_only'] = len(elastic_only)
consistency_record['elastic_only_ids'] = list(elastic_only)
consistency_record['inconsistent'] = len(different_items)
consistency_record['inconsistent_ids'] = list(different_items)
consistency_record['differences'] = differences
def analyse_differences(self, different_items):
all_differences = []
counter = 1
for item in different_items:
differences = []
mongo_item = self.get_mongo_item(item)
elastic_item = self.get_elastic_item(item)
print('Analysing item# {}'.format(counter))
self.compare_dicts(mongo_item, elastic_item, differences)
all_differences.append({'_id': item, 'differences': differences})
counter += 1
if counter > self.analysis_count:
break
return all_differences
def are_lists_equal(self, list_1, list_2):
if len(list_1) > 0 and not isinstance(list_1[0], dict):
return len(list(set(list_1) ^ set(list_2))) > 0
else:
return True
def compare_dicts(self, dict_1, dict_2, differences=None):
if differences is None:
differences = list()
diff_keys = list(set(dict_1.keys()) ^ set(dict_2.keys()))
if len(diff_keys) > 0:
# there are differences in keys so report them
differences.extend(diff_keys)
self.compare_dict_values(dict_1, dict_2, differences)
return list(set(differences))
def compare_dict_values(self, dict_1, dict_2, differences=None):
if differences is None:
differences = list()
for key in dict_1.keys():
if key in differences:
continue
if key not in dict_2:
differences.append(key)
continue
if isinstance(dict_1[key], list):
if not self.are_lists_equal(dict_1[key], dict_2[key]):
differences.append(key)
elif isinstance(dict_1[key], dict):
differences.extend(self.compare_dicts(dict_1[key], dict_2[key]))
else:
if not dict_1[key] == dict_2[key]:
differences.append(key)
def run(self, resource_name,
elasticsearch_url=ELASTICSEARCH_URL,
elasticsearch_index=ELASTICSEARCH_INDEX,
analysis_count=100):
"""
Compares the records in mongo and elastic for a given collection
Saves the results to "consistency" collection
:param resource_name: Name of the collection i.e. ingest, archive, published, t
|
Johnetordoff/osf.io
|
osf/migrations/0239_auto_20211110_1921.py
|
Python
|
apache-2.0
| 497
| 0.002012
|
# -*- coding: utf-8 -*-
|
# Generated by Django 1.11.28 on 2021-11-10 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0238_abstractprovider_allow_updates'),
]
operations = [
migrations.AddIndex(
model_name='schemaresponse',
index=models.Index(fields=['object_id', 'content_type'], nam
|
e='osf_schemar_object__8cc95e_idx'),
),
]
|
Karaage-Cluster/karaage-applications
|
kgapplications/south_migrations/0001_initial.py
|
Python
|
gpl-3.0
| 21,468
| 0.007965
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('applications', '0028_auto__chg_field_applicant_username'),
)
def forwards(self, orm):
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='applications').update(app_label='kgapplications')
db.send_create_signal('kgapplications', ['Application'])
db.send_create_signal('kgapplications', ['ProjectApplication'])
db.send_create_signal('kgapplications', ['Applicant'])
return
# Adding model 'Application'
db.create_table('kgapplications_application', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('secret_token', self.gf('django.db.models.fields.CharField')(default='42c3a931174e42f6c76f41056ecad8d9c18aabd0', unique=True, max_length=64)),
('expires', self.gf('django.db.models.fields.DateTimeField')()),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['karaage.Person'], null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('submitted_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=5)),
('complete_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('header_message', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('_class', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('kgapplications', ['Application'])
# Adding model 'ProjectApplication'
db.create_table('kgapplications_projectapplication', (
('application_ptr
|
', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['kgapplications.Application'], unique=True, primary_key=True)),
('needs_account', self.gf('django.db.models.fields.BooleanField')(default=True)),
('make_leader', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
|
('institute', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['karaage.Institute'], null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('additional_req', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pid', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['karaage.Project'], null=True, blank=True)),
))
db.send_create_signal('kgapplications', ['ProjectApplication'])
# Adding M2M table for field machine_categories on 'ProjectApplication'
m2m_table_name = db.shorten_name('kgapplications_projectapplication_machine_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('projectapplication', models.ForeignKey(orm['kgapplications.projectapplication'], null=False)),
('machinecategory', models.ForeignKey(orm['karaage.machinecategory'], null=False))
))
db.create_unique(m2m_table_name, ['projectapplication_id', 'machinecategory_id'])
# Adding model 'Applicant'
db.create_table('kgapplications_applicant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('email_verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('short_name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('institute', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['karaage.Institute'], null=True, blank=True)),
('department', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('position', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('telephone', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('mobile', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('supervisor', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=8, null=True, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('saml_id', self.gf('django.db.models.fields.CharField')(max_length=200, unique=True, null=True, blank=True)),
))
db.send_create_signal('kgapplications', ['Applicant'])
def backwards(self, orm):
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='kgapplications').update(app_label='applications')
return
# Deleting model 'Application'
db.delete_table('kgapplications_application')
# Deleting model 'ProjectApplication'
db.delete_table('kgapplications_projectapplication')
# Removing M2M table for field machine_categories on 'ProjectApplication'
db.delete_table(db.shorten_name('kgapplications_projectapplication_machine_categories'))
# Deleting model 'Applicant'
db.delete_table('kgapplications_applicant')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'karaage.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group', 'db_table': "'people_group'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'foreign_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['karaage.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'karaage.institu
|
r-kitaev/lucid-python-werkzeug
|
tests/contrib/test_wrappers.py
|
Python
|
bsd-3-clause
| 2,682
| 0
|
# -*- coding: utf-8 -*-
from werkzeug.contrib import wrappers
from werkzeug import routing
from werkzeug.wrappers import Request, Response
def test_reverse_slash_behavior():
"""Test ReverseSlashBehaviorRequestMixin"""
class MyRequest(wrappers.ReverseSlashBehaviorRequestMixin, Request):
pass
req = MyRequest.from_values('/foo/bar', 'http://example.com/test')
assert req.url == 'http://example.com/test/foo/bar'
assert req.path == 'foo/bar'
assert req.script_root == '/test/'
# make sure the routing system works with the slashes in
# reverse order as well.
map = routing.Map([routing.Rule('/foo/bar', endpoint='foo')])
adapter = map.bind_to_environ(req.environ)
assert adapter.match() == ('foo', {})
adapter = map.bind(req.host, req.script_root)
assert adapter.match(req.path) == ('foo', {})
def test_dynamic_charset_request_mixin():
"""Test DynamicCharsetRequestMixin"""
class MyRequest(wrappers.DynamicCharsetRequestMixin, Request):
pass
env = {'CONTENT_TYPE': 'text/html'}
req = MyRequest(env)
assert req.charset == 'latin1'
env = {'CONTENT_TYPE': 'text/html; charset=utf-8'}
req = MyRequest(env)
assert req.charset == 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'latin1'
MyRequest.url_charset = 'utf-8'
env = {'CONTENT_TYPE': 'application/octet-stream'}
req = MyRequest(env)
assert req.charset == 'latin1'
assert req.url_charset == 'utf-8'
def return_ascii(x):
return "ascii"
env = {'CONTENT_TYPE': 'text/plain; charset=x-weird-charset'}
req = MyRequest(env)
req.unknown_charset = return_ascii
assert req.charset == 'ascii'
assert req.url_charset == 'utf-8'
def test_dynamic_charset_response_mixin():
"""Test DynamicCharsetResponseMixin"""
class MyResponse(wrappers.DynamicCharsetResponseMixin, Response):
default_charset = 'utf-7'
resp = MyResponse(mimetype='text/
|
html')
assert resp.charset == 'utf-7'
resp.charset = 'utf-8'
assert resp.charset == 'utf-8'
assert resp.mimetype == 'text/html'
assert resp.mimetype_params == {'charset': 'utf-8'}
resp.mimetype_params['charset'] = 'iso-8859-15'
assert resp.charset == 'iso-8859-15'
resp.data = u'Hällo Wörld'
assert ''.join(resp.iter_encoded()) == \
u'Hällo Wörld'.encode('iso-8859-15')
del resp.heade
|
rs['content-type']
try:
resp.charset = 'utf-8'
except TypeError, e:
pass
else:
assert False, 'expected type error on charset setting without ct'
|
jcalbert/TextBlob
|
docs/conf.py
|
Python
|
mit
| 2,936
| 0.003406
|
# -*- coding: utf-8 -*-
import datetime as dt
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import textblob
sys.path.append(os.path.abspath("_themes"))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx_issues',
]
primary_domain = 'py'
default_role = 'py:obj'
issues_github_path = 'sloria/TextBlob'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TextBlob'
copyright = u'{0:%Y} <a href="http://stevenloria.com/">Steven Loria</a>'.format(
dt.datetime.utcnow()
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = textblob.__version__
exclude_patterns = ['_build']
pygments_style = 'flask_theme_support.FlaskyStyle'
html_theme = 'kr'
html_theme_path = ['_themes']
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['side-primary.html', 'searchbox.html'],
'**': ['side-secondary.html', 'localtoc.html',
'relations.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'textblobdoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TextBlob.tex', u'textblob Documentation',
u'Steven Loria', 'manual'),
]
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manua
|
l section).
man_pages = [
('index', 'textblob', u'textblob Documenta
|
tion',
[u'Steven Loria'], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'textblob', u'TextBlob Documentation',
u'Steven Loria', 'textblob', 'Simplified Python text-processing.',
'Natural Language Processing'),
]
|
nisavid/testbed
|
testbed/tests/redirect.py
|
Python
|
lgpl-3.0
| 4,763
| 0
|
#!/usr/bin/env python
""":mod:`Redirection <testbed.resources._redirect>` tests."""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import json as _json
import unittest as _unittest
from urllib import quote as _percent_encode
import napper as _napper
import spruce.logging as _logging
import testbed.testing as _testbedtest
class TestRedirections(_testbedtest.TestTestbed):
@property
def webservice_path(self):
return '/redirect'
@property
def webservice_probe_path(self):
return self._redirect_path
def _create_requests_session(self):
return _napper.WebRequestSession(follow_redirects=False)
class TestResponseRedirection(TestRedirections):
def test_get_response_redirect(self):
response = self.request('get', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_get_response_redirect_as_html(self):
response = self.request('get',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_post_response_redirect(self):
response = self.request('post', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_post_response_redirect_as_html(self):
response = self.request('post',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_postget_response_redirect(self):
response = self.request('postget', self._redirect_path)
self.assert_response_redirect_respo
|
nse(response,
loc=self._redirect_loc)
def test_postget_response_redirect_as_html(self):
response = self.request('postget',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response
|
,
loc=self._redirect_loc,
contenttype='text/html')
@property
def _redirect_loc(self):
return 'aoeu'
@property
def _redirect_path(self):
return 'response;loc={}'\
.format(_percent_encode(_json.dumps(self._redirect_loc),
safe=''))
class _TestRedirectionsCorsWithUntrustedOriginMixin(object):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_rejected_response(response,
exc_name='CorsOriginForbidden',
**kwargs_)
class TestRedirectionsCorsActualWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin, _testbedtest.TestCorsActual,
TestRedirections):
pass
class TestRedirectionsCorsActualWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsActual, TestRedirections):
pass
class TestRedirectionsCorsPreflightWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_preflight_accepted_response(response, **kwargs_)
class TestRedirectionsCorsPreflightWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
pass
if __name__ == '__main__':
_logging.basicConfig()
_unittest.main()
|
DasIch/relief
|
tests/schema/test_mappings.py
|
Python
|
bsd-3-clause
| 12,224
| 0.000736
|
# coding: utf-8
"""
tests.schema.test_mappings
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2013 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import pytest
from relief import (
Dict, OrderedDict, Unicode, Integer, NotUnserializable, Form, Element,
_compat
)
from tests.conftest import python2_only
from tests.schema.conftest import ElementTest
class MappingTest(ElementTest):
def test_getitem(self, element_cls):
element = element_cls({u"foo": 1})
assert element[u"foo"].value == 1
def test_get(self, element_cls):
element = element_cls({u"foo": 1})
assert element.get(u"foo").value == 1
assert element.get(
|
u"bar").value is NotUnserializable
assert element.get(u"bar").raw_value is None
def test_iter(self, element_cls):
keys = list(element_cls({u"foo": 1}))
assert len(keys) == 1
assert keys[0].value =
|
= u"foo"
@python2_only
def test_iterkeys(self, element_cls):
keys = list(element_cls({u"foo": 1}).iterkeys())
assert len(keys) == 1
assert keys[0].value == u"foo"
@python2_only
def test_viewkeys(self, element_cls):
keys = list(element_cls({u"foo": 1}).viewkeys())
assert len(keys) == 1
assert keys[0].value == u"foo"
def test_keys(self, element_cls):
keys = list(element_cls({u"foo": 1}).keys())
assert len(keys) == 1
assert keys[0].value == u"foo"
@python2_only
def test_itervalues(self, element_cls):
values = list(element_cls({u"foo": 1}).itervalues())
assert len(values) == 1
assert values[0].value == 1
@python2_only
def test_viewvalues(self, element_cls):
values = list(element_cls({u"foo": 1}).viewvalues())
assert len(values) == 1
assert values[0].value == 1
def test_values(self, element_cls):
values = list(element_cls({u"foo": 1}).values())
assert len(values) == 1
assert values[0].value == 1
@python2_only
def test_iteritems(self, element_cls):
items = list(element_cls({u"foo": 1}).iteritems())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
@python2_only
def test_viewitems(self, element_cls):
items = list(element_cls({u"foo": 1}).viewitems())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
def test_items(self, element_cls):
items = list(element_cls({u"foo": 1}).items())
assert len(items) == 1
assert items[0][0].value == u"foo"
assert items[0][1].value == 1
def test_set_list_of_tuples(self, element_cls):
element = element_cls([(u"foo", 1)])
assert element.raw_value == [(u"foo", 1)]
assert element.value == {u"foo": 1}
def test_set_non_mapping(self, element_cls):
element = element_cls(1)
assert element.raw_value == 1
assert element.value is NotUnserializable
def test_validate_empty(self, element_cls):
element = element_cls()
assert element.is_valid is None
assert not element.validate()
assert not element.is_valid
def test_validate_is_recursive(self):
validators = []
def key_validator(element, state):
validators.append("key")
return True
def value_validator(element, state):
validators.append("value")
return True
element = Dict.of(
Integer.validated_by([key_validator]),
Integer.validated_by([value_validator])
)({1: 1})
assert element.validate()
assert element.is_valid
assert validators == ["key", "value"]
def test_validate_value_empty(self, element_cls):
element = element_cls({})
assert element.is_valid is None
assert element.validate()
assert element.is_valid
def test_validate_value(self, element_cls):
element = element_cls({"foo": "1"})
assert element.raw_value == {"foo": "1"}
assert element.value == {u"foo": 1}
assert element.is_valid is None
assert element.validate()
assert element.is_valid
def test_validate_invalid_value(self, element_cls):
element = element_cls({"foo": "foo"})
assert element.raw_value == {"foo": "foo"}
assert element.value is NotUnserializable
assert element.is_valid is None
assert not element.validate()
assert not element.is_valid
class MutableMappingTest(MappingTest):
def test_setitem(self, element_cls):
element = element_cls()
with pytest.raises(TypeError):
element[u"foo"] = 1
@pytest.mark.parametrize('method', [
'setdefault', 'popitem', 'pop', 'update', 'clear'
])
def test_mutating_method_missing(self, element_cls, method):
element = element_cls()
assert not hasattr(element, method)
with pytest.raises(AttributeError):
getattr(element, method)
class TestDict(MutableMappingTest):
@pytest.fixture
def element_cls(self):
return Dict.of(Unicode, Integer)
@pytest.fixture
def possible_value(self):
return {u"foo": 1}
@python2_only
def test_has_key(self, element_cls):
assert element_cls({u"foo": 1}).has_key(u"foo")
assert not element_cls({u"foo": 1}).has_key(u"bar")
def test_set_strict(self, element_cls):
element = element_cls.using(strict=True)({u"foo": 1})
assert element.raw_value == {u"foo": 1}
assert element.value == {u"foo": 1}
def test_set_strict_raw(self, element_cls):
element = element_cls.using(strict=True)([(u"foo", 1)])
assert element.raw_value == [(u"foo", 1)]
assert element.value is NotUnserializable
def test_retains_ordering(self, element_cls):
value = [
(u"foo", 1),
(u"bar", 2),
(u"baz", 3)
]
assert element_cls(value).value == _compat.OrderedDict(value)
class TestOrderedDict(MutableMappingTest):
@pytest.fixture
def element_cls(self):
return OrderedDict.of(Unicode, Integer)
@pytest.fixture
def possible_value(self):
return _compat.OrderedDict([(u"foo", 1)])
@python2_only
def test_has_key(self, element_cls):
assert element_cls({u"foo": 1}).has_key(u"foo")
assert not element_cls({u"foo": 1}).has_key(u"bar")
def test_set_strict(self, element_cls):
value = _compat.OrderedDict({u"foo": 1})
element = element_cls.using(strict=True)(value)
assert element.raw_value == value
assert element.value == value
def test_set_strict_raw(self, element_cls):
element = element_cls.using(strict=True)({u"foo": 1})
assert element.raw_value == {u"foo": 1}
assert element.value is NotUnserializable
class TestForm(object):
def test_member_schema_ordering(self):
class Foo(Form):
spam = Element
eggs = Element
assert list(Foo.member_schema.keys()) == ["spam", "eggs"]
def test_member_schema_inheritance(self):
class Foo(Form):
spam = Element
class Bar(Foo):
eggs = Element
assert list(Bar.member_schema.keys()) == ["spam", "eggs"]
def test_getitem(self):
class Foo(Form):
spam = Unicode
foo = Foo({"spam": u"one"})
assert foo["spam"].value == u"one"
def test_contains(self):
class Foo(Form):
spam = Unicode
foo = Foo()
assert "spam" in foo
assert "eggs" not in foo
def test_len(self):
class Foo(Form):
spam = Unicode
assert len(Foo()) == 1
def test_iter(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
assert list(Foo()) == ["spam", "eggs"]
@python2_only
def test_iterkeys(self):
class Foo(Form):
spam = Unicode
eggs = Unicode
assert list(Foo().iterkeys()) == ["s
|
EmanueleCannizzaro/scons
|
test/CC/SHCC.py
|
Python
|
mit
| 2,436
| 0.002463
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute,
|
sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEME
|
NT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/CC/SHCC.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment()
shcc = foo.Dictionary('SHCC')
bar = Environment(SHCC = r'%(_python_)s wrapper.py ' + shcc)
foo.SharedObject(target = 'foo/foo', source = 'foo.c')
bar.SharedObject(target = 'bar/bar', source = 'bar.c')
""" % locals())
test.write('foo.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.write('bar.c', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("foo.c\n");
exit (0);
}
""")
test.run(arguments = 'foo')
test.fail_test(os.path.exists(test.workpath('wrapper.out')))
test.run(arguments = 'bar')
test.fail_test(test.read('wrapper.out') != "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kblin/supybot-gsoc
|
src/commands.py
|
Python
|
bsd-3-clause
| 30,909
| 0.0033
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import types
import geto
|
pt
import inspect
import threading
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbac
|
ks
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.info('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
return int(float(s))
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() ==
|
KaranKamath/SequiturG2P
|
symbols.py
|
Python
|
gpl-2.0
| 2,130
| 0.018779
|
__author__ = 'Maximilian Bisani'
__version__ = '$LastChangedRevision: 1691 $'
__date__ = '$LastChangedDate: 2011-08-03 15:38:08 +0200 (Wed, 03 Aug 2011) $'
__copyright__ = 'Copyright (c) 2004-2005 RWTH Aachen University'
__license__ = """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 2 (June
1991) as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without
|
even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, you will find it at
http://www.gnu.org/licenses/gpl.html, or write to the Free Software
Foundation, Inc., 51 Franlin Street, Fifth Floor, Boston, MA 02110,
USA.
Should a provision of no. 9 and 10 of the GNU General Public License
be invalid
|
or become invalid, a valid provision is deemed to have been
agreed upon which comes closest to what the parties intended
commercially. In any case guarantee/warranty shall be limited to gross
negligent actions or intended actions or fraudulent concealment.
"""
class SymbolInventory:
"""
0 (zero) is __void__ which is used internally as a terminator to
indicate end of a multigram
1 (one) is __term__, the end-of-string symbol (similar to the
end-of-sentence word in language modeling).
"""
term = 1
def __init__(self):
self.list = ['__void__', '__term__']
self.dir = { '__term__' : self.term }
def size(self):
"The number of symbols, including __term__, but not counting __void__."
return len(self.list) - 1
def index(self, sym):
try:
return self.dir[sym]
except KeyError:
result = self.dir[sym] = len(self.list)
self.list.append(sym)
return result
def parse(self, seq):
return tuple(map(self.index, list(seq)))
def symbol(self, ind):
return self.list[ind]
def format(self, seq):
return tuple(map(self.symbol, seq))
|
nttcom/eclcli
|
eclcli/bare/bareclient/ecl/common/apiclient/utils.py
|
Python
|
apache-2.0
| 2,975
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_utils import encodeutils
except ImportError:
from oslo.utils import encodeutils
import six
from .._i18n import _
from . import exceptions
from .. import uuidutils
def find_resource(manager, name_or_id, **find_args):
"""Look for resource in a given manager.
Used as a helper for the _find_* methods.
Example:
.. code-block:: python
def _find_hypervisor(cs, hypervisor):
#Get a hypervisor by name or ID.
return cliutils.find_resource(cs.hypervisors, hypervisor)
"""
# first try to get entity as integer id
try:
return manager.get(int(name_or_id))
except (TypeError, ValueError, exceptions.NotFound):
pass
# now try to get entity as uuid
try:
if six.PY2:
tmp_id = encodeutils.safe_encode(name_or_id)
else:
tmp_id = encodeutils.safe_decode(name_or_id)
if uuidutils.is_uuid_like(tmp_id):
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
# for str id which is not uuid
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
except exceptions.NotFound:
pass
try:
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
except exceptions.NotFound:
msg = _("No %(name)s with a name or "
"ID of '%(name_or_id)s' exists.") % \
{
|
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = _("Multiple %(name)s matches found for "
"'%(name_or_id)s', use an ID to be more specific.") % \
{
|
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
|
jpschnel/maze-vision
|
maze_vision.py
|
Python
|
apache-2.0
| 2,411
| 0.072999
|
# 0 = open space, 1=boundary , 2= the robot, 3= finish
def maze_vision():
path= ''
maze=[]
maze.append(list('000000002000000'))
maze.append(list('000000003001100'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
#print(maze)
fx=0
fy=0
sx=0
sy=0
#print(maze[0][8])
#print(len(maze[0]))
for x in range(0,len(maze[0])-1):
for y in range(0,len(maze)-1):
if maze[y][x]=='2':
sx=x
sy=y
elif maze[y][x]=='3':
fx=x
fy=y
#print(fx)
#print(fy)
#print(sx)
#print(sy)
ans= distance(maze,sx,sy,fx,fy,path)
print ("the shortest path is "+str(ans)+ " spaces")
print(path)
def distance(maze, sx, sy, fx, fy,path):
up= int(sy-1)
down= int(sy+1)
left = int(sx-1)
right = int(sx+1)
print(str(sx)+','+str(sy))
updist=3333333
downdist=6666666
leftdist=5555555
right
|
dist=4444444
if maze[sy][sx]=='3': #reached finish
print(hit)
return 0 #return
#up
# if up >-1:
# if maze[sy][up]=='0': #if this direction is open
# maze[sy][up]='4' #mark it as traveled to
# path= path +'u'
|
#add that direction to final path
# updist= 1+ distance(maze,up,sy,fx,fy,path) #calculate shortest dist from there
#if it makes it past here, that was not the shortest distance
#path= path[:-1] #remove that direction from final path
#maze[sy][up]=0 #mark that direction as not traveled
#down
print(down)
if down < (len(maze)-1):
print('w')
print(maze[down][sx])
if maze[down][sx]=='0':
maze[sy][sx]='4'
#path path +'d'
downdist= 1 + distance(maze,down,sy,fx,fy,path)
#path= path[:-1]
#maze[sy][down]='0'
#else:
#downdist=999999
#left
# if left>-1:
# if maze[left][sx]=='0':
# maze[left][sx]='4'
# path= path +'l'
# leftdist= 1+distance(maze,sx,left,fx,fy,path)
# path= path[:-1]
# maze[left][sx]='0'
#right
# if right<(len(maze[0])-1):
# if maze[sx][right]=='0':
# maze[sx][right]='4'
# path=path+'r'
# rightdist= 1+distance(maze,sx,right,fx,fy,path)
# path=path[:-1]
# maze[right][sx]='0'
#print(str(sx)+','+str(sy))
return min(updist,downdist,rightdist,leftdist)
# sum2= min(rightdist,leftdist)
# return min(sum2,sum1)
maze_vision()
|
eharney/nova
|
nova/db/sqlalchemy/migration.py
|
Python
|
apache-2.0
| 2,918
| 0
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from nova.db.sqlalchemy import api as db_session
from nova import exception
from nova.openstack.common.gettextutils import _
INIT_VERSION = 215
_REPOSITORY = None
get_engine = db_session.get_engine
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.NovaException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
|
return versioning_api.upgrade(get_engine(), repositor
|
y, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.NovaException(
_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
dominikkowalski/django-powerdns-dnssec
|
powerdns/routers.py
|
Python
|
bsd-2-clause
| 959
| 0
|
class PowerDNSRouter(object):
"""Route all operations on powerdns models to the powerdns database."""
db_name = 'powerdns'
app_name = 'powerdns'
def db_for_read(self, model, **hints):
if model._meta.app_label == self.app_name:
return self.db_name
return None
def db_for_write(self, model, **hi
|
nts):
if model._meta.app_label == self.app_name:
return self.db_name
return None
def allow_relation(self, obj1, obj2, **hints):
if (obj1._meta.app_label == self.app_name and
obj2._meta.app_label == self.app_name):
return True
return None
d
|
ef allow_syncdb(self, db, model):
if model._meta.app_label == self.app_name:
return db == self.db_name
elif db == self.db_name:
# workaround for http://south.aeracode.org/ticket/370
return model._meta.app_label == 'south'
return None
|
smitchell556/cuttle
|
tests/test_cuttle_class.py
|
Python
|
mit
| 6,111
| 0.000327
|
# -*- coding: utf-8
"""
Tests related to the Cuttle class.
"""
import os
import unittest
import warnings
import time
from cuttle.reef import Cuttle, Column
from cuttlepool import CuttlePool
from cuttlepool.cuttlepool import PoolConnection
DB = '_cuttle_test_db'
DB2 = '_cuttle_test_db2'
HOST = 'localhost'
class BaseDbTestCase(unittest.TestCase):
def setUp(self):
self.Pool = CuttlePool
self.Connection = PoolConnection
self.credentials = dict(host=HOST)
self.sql_type = os.environ['TEST_CUTTLE'].lower()
if self.sql_type == 'mysql':
import pymysql
from mysql_credentials import USER, PASSWD
self.Cursor = pymysql.cursors.Cursor
self.connect = pymysql.connect
self.credentials.update(dict(user=USER, passwd=PASSWD))
self.db = Cuttle(self.sql_type, db=DB, **self.credentials)
class Heros(self.db.Model):
columns = [
Column('hero_id', 'INT', auto_increment=True, primary_key=True),
Column('hero_name', 'VARCHAR', maximum=16)
]
self.testtable1 = Heros
self.create_heros_statement = (
'CREATE TABLE IF NOT EXISTS {} (\n'
'hero_id INT AUTO_INCREMENT PRIMARY KEY,\n'
'hero_name VARCHAR(16)\n'
')').format(self.testtable1().name)
self.heros_schema = (('hero_id', 'int(11)', 'NO', 'PRI', None, 'auto_increment'),
('hero_name', 'varchar(16)', 'YES', '', None, ''))
def tearDown(self):
warnings.filterwarnings('ignore')
self.db.drop_db()
def createPool(self, **kwargs):
warnings.filterwarnings('ignore')
return CuttlePool(self.connect, **kwargs)
class DbNestedModelTestCase(BaseDbTestCase):
def setUp(self):
super(DbNestedModelTestCase, self).setUp()
class UselessTable(self.db.Model):
pass
self.uselesstable = UselessTable
class Villains(UselessTable):
columns = [
Column('villain_id', 'INT'),
Column('villain_name', 'VARCHAR', maximum=16)
]
self.testtable2 = Villains
class TwoDbTestCase(BaseDbTestCase):
def setUp(self):
super(TwoDbTestCase, self).setUp()
self.db2 = Cuttle(self.sql_type, db=DB2, **self.credentials)
class ThrowAway(self.db2.Model):
columns = [
Column('throwaway', 'INT')
]
self.testtable2 = ThrowAway
def tearDown(self):
super(TwoDbTestCase, self).tearDown()
self.db2.drop_db()
class CuttleInstanceTestCase(unittest.TestCase):
def test_improper_sql_type(self):
with self.assertRaises(ValueError):
db = Cuttle('wrongsql', db='db')
def test_no_db(self):
with self.assertRaises(ValueError):
db = Cuttle('mysql')
def test_name_property(self):
db_name = 'get_schwifty'
db = Cuttle('mysql', db=db_name)
self.assertEqual(db.name, db_name)
class CuttleCreateDbTestCase(BaseDbTestCase):
def test_create_db(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
self.assertIn((DB,), dbs)
def test_table_schema(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get tables
cur.execute('SHOW TABLES')
tbls = cur.fetchall()
self.assertEqual(((self.testtable1().name,),), tbls)
# get table schema
cur.execute('DESCRIBE {}'.format(self.testtable1().name))
tblschma = cur.fetchall()
self.assertEqual(self.heros_schema, tblschma)
class CuttleCreateMultiDbTestCase(TwoDbTestCase):
def test_create_two_dbs(self):
self.db.create_db()
self.db2.create_db()
pool1 = self.createPool(db=DB, **self.credentials)
pool2 = self.createPool(db=DB2, **self.credent
|
ials)
con1 = pool1.get_connection()
cur1 = con1.cursor()
con2 = pool2.get_connection()
cur2 = con2.cursor()
# get databases
cur1.execute('SHOW DATABASES')
dbs = cur1.fetchall()
self.assertIn((DB,), dbs)
self.
|
assertIn((DB2,), dbs)
# get tables
cur1.execute('SHOW TABLES')
tbls1 = cur1.fetchall()
cur2.execute('SHOW TABLES')
tbls2 = cur2.fetchall()
self.assertIn((self.testtable1().name,), tbls1)
self.assertNotIn((self.testtable2().name,), tbls1)
self.assertIn((self.testtable2().name,), tbls2)
self.assertNotIn((self.testtable1().name,), tbls2)
class CuttleCreateDbNestedModelsTestCase(DbNestedModelTestCase):
def test_correct_tables_made(self):
self.db.create_db()
pool = self.createPool(db=DB, **self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get tables
cur.execute('SHOW TABLES')
tbls = cur.fetchall()
self.assertIn((self.testtable1().name,), tbls)
self.assertIn((self.testtable2().name,), tbls)
self.assertNotIn((self.uselesstable().name,), tbls)
class CuttleDropDbTestCase(BaseDbTestCase):
def setUp(self):
super(CuttleDropDbTestCase, self).setUp()
self.db.create_db()
def test_drop_db(self):
pool = self.createPool(**self.credentials)
con = pool.get_connection()
cur = con.cursor()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
# make sure database actually exists
self.assertIn((DB,), dbs)
# drop the database
self.db.drop_db()
# get databases
cur.execute('SHOW DATABASES')
dbs = cur.fetchall()
# make sure database no longer exists
self.assertNotIn((DB,), dbs)
|
pavelpat/yased
|
src/tests/__init__.py
|
Python
|
apache-2.0
| 1,664
| 0.000601
|
# coding: utf-8
from unittest import TestCase
from yased import EventsDispatcher, Event
class AnyEvent(Event):
|
"""Represents any event."""
class EventsDispatcherTestCase(TestCase):
def setUp(self):
self.ed = EventsDispatcher()
def te
|
st_send(self):
calls = []
def handler(*args, **kwargs):
calls.append(True)
self.ed.connect(handler, AnyEvent)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
self.ed.disconnect(handler, AnyEvent)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
def test_send_with_sender(self):
calls = []
def handler(*args, **kwargs):
calls.append(True)
self.ed.connect(handler, AnyEvent, self)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 1)
self.ed.send(AnyEvent())
self.assertEqual(len(calls), 1)
self.ed.disconnect(handler, AnyEvent)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 2)
self.ed.disconnect(handler, AnyEvent, sender=self)
self.ed.send(AnyEvent(), sender=self)
self.assertEqual(len(calls), 2)
def test_send_args_kwargs(self):
calls = []
event_args = (1, 2, 3)
event_kwargs = {'a': 4, 'b': 5, 'c': 6}
def handler(*args, **kwargs):
calls.append(True)
self.assertEqual(args, event_args)
self.assertEqual(kwargs, event_kwargs)
self.ed.connect(handler, AnyEvent)
self.ed.send(AnyEvent(*event_args, **event_kwargs))
self.assertEqual(len(calls), 1)
|
themotleyfool/django-offline-messages
|
tests/settings.py
|
Python
|
bsd-3-clause
| 361
| 0
|
# django-offline-messages Test Settings
|
DATABASES = {'default': {'ENGINE': 'django.
|
db.backends.sqlite3'}}
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'offline_messages',
'tests'
)
ROOT_URLCONF = ''
COVERAGE_ADDITIONAL_MODULES = ('offline_messages',)
|
vitaly-krugl/nupic
|
examples/opf/experiments/multistep/hotgym_best_sp/description.py
|
Python
|
agpl-3.0
| 2,842
| 0.007389
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it wi
|
ll be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------
|
------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'verbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tmParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 1}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
|
lnls-sirius/dev-packages
|
siriuspy/siriuspy/clientarch/__init__.py
|
Python
|
gpl-3.0
| 306
| 0
|
"""Subpackage for the Archiver server."""
fr
|
om ..envars import SRVURL_ARCHIVER as SERVER_URL
from .client import ClientArchiver
from .pvarch import PVDetails, PVData, PVDataSet
from .devices import Orbit, Correctors, TrimQuads
from .time import Time
from . import exceptions
del client,
|
pvarch, devices
|
openqt/algorithms
|
leetcode/python/lc754-reach-a-number.py
|
Python
|
gpl-3.0
| 1,163
| 0.009458
|
# coding=utf-8
import unittest
"""754. Reach a Number
https://leetcode.com/problems/reach-a-number/description/
You are standing at position `0` on an infinite number line. There is a goal
at position `target`.
On each move, you can either go left or right. During the _n_ -th move
(starting from 1), you take _n_ steps.
Return the minimum number of steps required to reach the destination.
**Example 1:**
**Input:** target = 3
**Output:** 2
**Explanation:**
On the first move we step from 0 to 1.
On the second step we step from 1 to 3.
**Example 2:**
**Input:** target = 2
**Output:** 3
**Explanation:**
On the fi
|
rst move we step from 0 to 1.
On the second move we step from 1 to -1.
On the third move we step from -1 to 2.
**Note:**
* `target` will be a non-zero integer in the range `[-10^9, 10^9]`.
Similar Questions:
"""
class Solution(object):
def reachNumber(self, target):
"""
:t
|
ype target: int
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
danstoner/python_experiments
|
playing_with_kivy/kivi-examples/widgets/lists/list_reset_data.py
|
Python
|
gpl-2.0
| 2,398
| 0.000834
|
# -*- coding: utf-8 -*-
from kivy.uix.listview import ListView
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from kivy.adapters.listadapter import ListAdapter
from kivy.adapters.models import SelectableDataItem
from kivy.uix.listview import ListItemButton
from random import choice
from string import ascii_uppercase, digits
import random
c
|
lass DataItem(SelectableDataItem):
def __init__(self, **kwargs):
super(DataItem, self).__init__(**kwargs)
self.name = ''.join(choice(ascii_uppercase + digits) for x in range(6))
class MainView(FloatLayout):
"""
Implementation of a ListView using the kv language.
"""
def __init__(self, **kwargs):
super(Main
|
View, self).__init__(**kwargs)
data_items = []
data_items.append(DataItem())
data_items.append(DataItem())
data_items.append(DataItem())
list_item_args_converter = lambda row_index, obj: {'text': obj.name,
'size_hint_y': None,
'height': 25}
self.list_adapter = \
ListAdapter(data=data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=False,
allow_empty_selection=False,
cls=ListItemButton)
self.list_view = ListView(adapter=self.list_adapter)
self.add_widget(self.list_view)
self.toggle = 'adding'
Clock.schedule_interval(self.update_list_data, 1)
def update_list_data(self, dt):
items = self.list_adapter.data
if self.toggle == 'adding':
item = DataItem(name='New ' * random.randint(1, 2))
items.append(item)
self.toggle = 'changing'
print 'added ' + item.name
else:
random_index = random.randint(0, len(items) - 1)
item = items[random_index]
items[random_index] = DataItem()
self.toggle = 'adding'
print 'changed {0} to {1}'.format(item.name,
items[random_index].name)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
|
hpk42/pluggy
|
testing/benchmark.py
|
Python
|
mit
| 2,435
| 0
|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import HookspecMarker, HookimplMarker, PluginManager
from pluggy._hooks import HookImpl
from pluggy._callers import _multicall
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
@hookimpl
def hook(arg1, arg2, arg3):
|
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def wrapper(arg1, arg2, arg3):
yield
@pytest.fixture(params=[10, 100], ids="hooks={}".format)
def hooks(request):
return [hook for i in range(request.param)]
@pytest.fixture(params=[10, 100], ids="wrappers={}".format)
def wrappers(request):
return
|
[wrapper for i in range(request.param)]
def test_hook_and_wrappers_speed(benchmark, hooks, wrappers):
def setup():
hook_name = "foo"
hook_impls = []
for method in hooks + wrappers:
f = HookImpl(None, "<temp>", method, method.example_impl)
hook_impls.append(f)
caller_kwargs = {"arg1": 1, "arg2": 2, "arg3": 3}
firstresult = False
return (hook_name, hook_impls, caller_kwargs, firstresult), {}
benchmark.pedantic(_multicall, setup=setup)
@pytest.mark.parametrize(
("plugins, wrappers, nesting"),
[
(1, 1, 0),
(1, 1, 1),
(1, 1, 5),
(1, 5, 1),
(1, 5, 5),
(5, 1, 1),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(20, 20, 0),
(100, 100, 0),
],
)
def test_call_hook(benchmark, plugins, wrappers, nesting):
pm = PluginManager("example")
class HookSpec:
@hookspec
def fun(self, hooks, nesting: int):
yield
class Plugin:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<Plugin {self.num}>"
@hookimpl
def fun(self, hooks, nesting: int):
if nesting:
hooks.fun(hooks=hooks, nesting=nesting - 1)
class PluginWrap:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<PluginWrap {self.num}>"
@hookimpl(hookwrapper=True)
def fun(self):
yield
pm.add_hookspecs(HookSpec)
for i in range(plugins):
pm.register(Plugin(i), name=f"plug_{i}")
for i in range(wrappers):
pm.register(PluginWrap(i), name=f"wrap_plug_{i}")
benchmark(pm.hook.fun, hooks=pm.hook, nesting=nesting)
|
DarthGeek01/PopulationSimulator
|
Genetics/Allele.py
|
Python
|
gpl-2.0
| 6,542
| 0.003363
|
__author__ = 'ariel'
"""
Python Population Simulator
Copyright (C) 2015 Ariel Young
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from Genetics import Expressions, Genotypes
from Crypto.Random import random
class TraitAlleles(object):
traitPhenotypes = {"furColor" : {"dominant" : "long", "recessive" : "short"},
"furLength" : {"dominant" : "black", "recessive" : "brown"},
"isTa" : {"dominant" : "tall", "recessive" : "short"}}
traitIsComplete = False
expression = None
trait = None
genotype = None
phenotype = None
letterOne = str
letterTwo = str
choices = []
def __init__(self, trait, alleles=False, letter_one = None, letter_two = None):
self.trait
|
= trait
if alleles:
if letter_one != None:
self.letterOne = letter_one
if letter_two != None:
self.letterTwo = letter_two
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Exp
|
ressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
if trait == "furColor":
choices = list('Ff')
elif trait == "furLength":
choices = list('Ll')
elif trait == "isTall":
choices = list("Hh")
def getGenotype(self):
return self.genotype
def getExpression(self):
return self.expression
def setLetterOne(self, letter):
self.letterOne = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def setLetterTwo(self, letter):
self.letterTwo = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getRandomAllele(self):
rand = random.randint(0, 1)
if rand:
return self.letterOne
else:
return self.letterTwo
def __determinePhenotype(self):
if self.genotype == Genotypes.DOMINANT:
self.phenotype = self.traitPhenotypes[self.trait]["dominant"]
else:
self.genotype = self.traitPhenotypes[self.trait]["recessive"]
self.choices = [self.letterOne, self.letterTwo]
def populateWithRandom(self):
self.letterOne = random.choice(self.choices)
self.letterTwo = random.choice(self.choices)
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getAlleles(self):
if self.letterOne and self.letterTwo:
return (self.letterOne, self.letterTwo)
elif self.letterOne and not self.letterTwo:
return self.letterOne
elif self.letterTwo and not self.letterOne:
return self.letterTwo
def getAllelesAsList(self):
return [self.letterOne, self.letterTwo]
def getPhenotype(self):
return self.phenotype
|
GaelVaroquaux/diffusion-segmentation
|
spectral_embedding.py
|
Python
|
bsd-3-clause
| 11,610
| 0.006202
|
import numpy as np
import scipy
from scipy import linalg
from scipy import sparse
from scipy import ndimage
import scipy.sparse.linalg.eigen.arpack
from scipy.sparse.linalg.eigen.arpack import eigen, eigen_symmetric
#from pyamg.graph import lloyd_cluster
import pyamg
from pyamg import smoothed_aggregation_solver
from scipy.sparse.linalg import lobpcg
from diffusions import _build_laplacian
def spectral_embedding(adjacency):
""" A diffusion reordering, but that works for negative values.
"""
# Normalize the graph: the sum of each set of edges must be one
abs_adjacency = np.abs(adjacency)
diag_weights = abs_adjacency.sum(axis=1)
diag_mask = (diag_weights == 0)
diag_weights
|
[diag_mask] = 1
d = np.sign(diag_weights)/np.sqrt(np.abs(
|
diag_weights))
lap = abs_adjacency*d[:, np.newaxis]*d[np.newaxis, :]
lambdas, diffusion_map = linalg.eigh(lap)
return lambdas, diffusion_map.T[-2::-1]*d
def spectral_embedding_sparse(adjacency, k_max=14, mode='amg', take_first=True):
""" A diffusion reordering, but that works for negative values.
"""
# Normalize the graph: the sum of each set of edges must be one
diag_weights = np.array(adjacency.sum(axis=1))
diag_mask = (diag_weights == 0)
diag_weights[diag_mask] = 1
dd = np.sign(diag_weights)/np.sqrt(np.abs(diag_weights))
if mode == 'bf':
lambdas, diffusion_map = eigen_symmetric(adjacency, k=k_max, which='LA')
print lambdas
if take_first:
res = diffusion_map.T[::-1]*dd.ravel()
else:
res = diffusion_map.T[-2::-1]*dd.ravel()
elif mode == 'amg':
print 'amg'
sh = adjacency.shape[0]
adjacency = adjacency.copy()
#diag = sparse.coo_matrix((diag_weights.ravel(), (range(sh), range(sh))))
diag = sparse.eye(sh, sh)
adjacency = - adjacency + diag
ml = smoothed_aggregation_solver(adjacency.tocsr())
X = scipy.rand(adjacency.shape[0], k_max)
#X[:, 0] = 1. / np.sqrt(adjacency.shape[0])
X[:, 0] = 1. / dd.ravel()
M = ml.aspreconditioner()
lambdas, diffusion_map = lobpcg(adjacency, X, M=M, tol=1.e-12, largest=False)
print lambdas
if take_first:
res = diffusion_map.T * dd.ravel()
else:
res = diffusion_map.T[1:] * dd.ravel()
print res.shape, dd.shape
return res
def modularity_embedding(adjacency, kmax=10):
""" Proceedings of the fifth SIAM international conference on data
mining, Smyth, A spectral clustering approach to finding
communities in graphs.
Return the eigenvalues of the Q matrice
"""
#n = len(adjacency)
abs_adjacency = np.abs(adjacency)
#degrees = adjacency.copy()
#degrees.flat[::n+1] = 0
#degrees = degrees.sum(axis=0)
#weights = 1/degrees[:, np.newaxis] * abs_adjacency
#weights.flat[::n+1] = 1
weights = abs_adjacency/abs_adjacency.sum(axis=0)
lambdas, maps = linalg.eig(weights)
indices = np.argsort(lambdas)[::-1]
print lambdas[:10]
return maps.T[indices]
def modularity_embedding_sparse(adjacency, kmax=10):
""" Proceedings of the fifth SIAM international conference on data
mining, Smyth, A spectral clustering approach to finding
communities in graphs.
Return the eigenvalues of the Q matrice
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
abs_adjacency = np.abs(adjacency)
weights = abs_adjacency/abs_adjacency.sum(axis=0)
weights = sparse.csc_matrix(weights)
lambdas, maps = eigen(weights, \
k=kmax, which='LR')
print lambdas
return maps.T#[1:]
def newman_clustering(adjacency, eps=1e-8):
""" Newmann's spectral embedding algorithm to maximize modularity.
"""
n = len(adjacency)
abs_adjacency = np.abs(adjacency)
abs_adjacency.flat[::n+1] = 0
degrees = abs_adjacency.sum(axis=0)
weights = abs_adjacency - np.dot(degrees[:, np.newaxis],
degrees[np.newaxis, :])/degrees.sum()
weights.flat[::n+1] = 0
weights -= np.diag(weights.sum(axis=0))
lambdas, maps = linalg.eigh(weights)
if lambdas[-1] <= eps:
return np.ones(n, dtype=np.int)
cluster1 = maps.T[-1] >= 0
cluster2 = maps.T[-1] < 0
labels = np.zeros(n, dtype=np.int)
labels[cluster1] = 2*newman_clustering(adjacency[cluster1].T[cluster1])
labels[cluster2] = (1+
2*newman_clustering(adjacency[cluster2].T[cluster2])
)
return labels
def q_score(adjacency, labels):
""" Returns the Q score of a clustering.
"""
q = 0
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
"""
weights = adjacency
total_weights = 0.5 * weights.sum()
for label in np.unique(labels):
inds = np.nonzero(labels == label)[0]
a = 0.5 * (weights[inds][:, inds]).sum()
b = weights[inds].sum() - a
q += a/total_weights
q -= 0.5*(b/total_weights)
#q += weights[label == labels].T[label == labels].sum()/total_weights
#q -= (weights[label == labels].sum()/total_weights)**2
return 2 * q
def n_cut(adjacency, labels):
""" Returns the Q score of a clustering.
"""
q = 0
"""
if isinstance(adjacency, sparse.csc.csc_matrix):
adjacency = np.array(adjacency.todense())
"""
weights = adjacency
total_weights = 0.5 * weights.sum()
for label in np.unique(labels):
inds = np.nonzero(labels == label)[0]
a = (weights[inds][:, inds]).sum()
b = weights[inds].sum()
q += (b - a)/b
return - q
def best_k_means(k, maps, adjacency, n_bst=10):
from nipy.neurospin.clustering.clustering import _kmeans
best_score = -np.inf
for _ in range(n_bst):
print "doing kmeans"
_, labels, _ = _kmeans(maps, nbclusters=k)
score2 = q_score(adjacency, labels)
score = n_cut(adjacency, labels)
if score > best_score:
best_score = score
best_score2 = score2
best_labels = labels
return best_labels, best_score2 #best_score
def communities_clustering(adjacency, k_best=None, n_bst=2):
adjacency = np.abs(adjacency)
n_features = adjacency.shape[0]
adjacency.flat[::n_features+1] = 0
maps = modularity_embedding(adjacency)
scores = dict()
if k_best is None:
#for k in range(2, .3*n_features):
for k in range(2, 6):
this_maps = maps[:k-1].T.copy()
labels, score = best_k_means(k, this_maps, adjacency, n_bst=n_bst)
scores[k] = score
print scores[k]
k_best = scores.keys()[np.argmax(scores.values())]
this_maps = maps[:k_best-1].T.copy()
labels, score = best_k_means(k_best, this_maps, adjacency,
n_bst=5*n_bst)
print 'Final : k=%i, score=%s' % (k_best, score)
return labels
def communities_clustering_sparse(adjacency, k_best=None, k_min=2, k_max=8, n_bst=4, mode='bf', take_first=False):
maps = spectral_embedding_sparse(adjacency, k_max=k_max+2, mode=mode, \
take_first=take_first)
scores = dict()
res = dict()
if k_best is None:
for k in range(k_min, k_max + 1):
this_maps = maps[:k - 1].T.copy()
labels, score = best_k_means(k, this_maps, adjacency, n_bst=n_bst)
scores[k] = score
print scores[k]
res[k] = labels
#k_best = scores.keys()[np.argmax(scores.values())]
else:
this_maps = maps[:k_best - 1].T.copy()
res, scores = best_k_means(k_best, this_maps, adjacency,
n_bst=4*n_bst)
print 'Final : k=%i, score=%s' % (k_best, scores)
return res, scores
def separate_in_regions(data, mask=None, k_best=None, k_min=2, k_max=8, \
center=None, only_connex=True, n_times=4,\
take_first=True, beta=10, mode='bf'):
"""
Separate an image i
|
googleapis/python-retail
|
google/cloud/retail_v2/services/search_service/__init__.py
|
Python
|
apache-2.0
| 765
| 0
|
# -*- coding:
|
utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop
|
y of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import SearchServiceClient
from .async_client import SearchServiceAsyncClient
__all__ = (
"SearchServiceClient",
"SearchServiceAsyncClient",
)
|
StackStorm/mistral
|
mistral/tests/unit/engine/test_action_defaults.py
|
Python
|
apache-2.0
| 6,915
| 0
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain t
|
est cases
# the change in value is not perma
|
nent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
|
rodxavier/open-pse-initiative
|
django_project/companies/models.py
|
Python
|
mit
| 2,705
| 0.004806
|
from datetime import datetime, timedelta
from django.db import models
from django.db.models import Max, Min
from tinymce.models import HTMLField
class Company(models.Model):
name = models.CharField(max_length=75, blank=True, null=True)
symbol = models.CharField(max_length=10, blank=True, null=True)
description = HTMLField(blank=True, null=True, default='')
listing_date = models.DateField(blank=True, null=True)
renamed_to = models.ForeignKey('self', blank=True, null=True, default=None, related_name='renamed_from')
order = models.IntegerField(blank=True, default=0)
is_index = models.BooleanField(blank=True, default=False)
is_currently_listed = models.BooleanField(blank=True, default=True)
is_suspended = models.BooleanField(blank=True, default=False)
created_datetime = models.DateTimeField(auto_now_add=True)
updated_datetime = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('symbol',)
verbose_name = 'Company'
verbose_name_plural = 'Companies'
def __unicode__(self):
return self.symbol if self.symbol is not None else self.name
def __str__(self):
return self.symbol if self.symbol is not None else self.name
@propert
|
y
def readable_name(self):
if self.is_index:
return self.name[1:]
else:
return self.name
@property
def year_high(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - tim
|
edelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_high = quotes.aggregate(Max('price_high'))
return ('%f' % year_high['price_high__max']).rstrip('0').rstrip('.')
@property
def year_low(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - timedelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_low = quotes.aggregate(Min('price_low'))
return ('%f' % year_low['price_low__min']).rstrip('0').rstrip('.')
@property
def last_thirty_quotes(self):
quotes = self.quote_set.order_by('-quote_date')[:30]
return quotes
|
springload/draftjs_exporter
|
draftjs_exporter/defaults.py
|
Python
|
mit
| 1,811
| 0
|
from draftjs_exporter.constants import BLOCK_TYPE
|
S, INLINE_STYLES
from draftjs_exporter.dom import DOM
from draftjs_exporter.types import Element, Props
def render_children(props: Props) -> Element:
"""
Renders the children of a component w
|
ithout any specific
markup for the component itself.
"""
return props["children"]
def code_block(props: Props) -> Element:
return DOM.create_element(
"pre", {}, DOM.create_element("code", {}, props["children"])
)
# Default block map to extend.
BLOCK_MAP = {
BLOCK_TYPES.UNSTYLED: "p",
BLOCK_TYPES.HEADER_ONE: "h1",
BLOCK_TYPES.HEADER_TWO: "h2",
BLOCK_TYPES.HEADER_THREE: "h3",
BLOCK_TYPES.HEADER_FOUR: "h4",
BLOCK_TYPES.HEADER_FIVE: "h5",
BLOCK_TYPES.HEADER_SIX: "h6",
BLOCK_TYPES.UNORDERED_LIST_ITEM: {"element": "li", "wrapper": "ul"},
BLOCK_TYPES.ORDERED_LIST_ITEM: {"element": "li", "wrapper": "ol"},
BLOCK_TYPES.BLOCKQUOTE: "blockquote",
BLOCK_TYPES.PRE: "pre",
BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
# Default style map to extend.
# Tags come from https://developer.mozilla.org/en-US/docs/Web/HTML/Element.
# and are loosely aligned with https://github.com/jpuri/draftjs-to-html.
# Only styles that map to HTML elements are allowed as defaults.
STYLE_MAP = {
INLINE_STYLES.BOLD: "strong",
INLINE_STYLES.CODE: "code",
INLINE_STYLES.ITALIC: "em",
INLINE_STYLES.UNDERLINE: "u",
INLINE_STYLES.STRIKETHROUGH: "s",
INLINE_STYLES.SUPERSCRIPT: "sup",
INLINE_STYLES.SUBSCRIPT: "sub",
INLINE_STYLES.MARK: "mark",
INLINE_STYLES.QUOTATION: "q",
INLINE_STYLES.SMALL: "small",
INLINE_STYLES.SAMPLE: "samp",
INLINE_STYLES.INSERT: "ins",
INLINE_STYLES.DELETE: "del",
INLINE_STYLES.KEYBOARD: "kbd",
}
|
rdhyee/waterbutler
|
tests/providers/s3/test_provider.py
|
Python
|
apache-2.0
| 33,061
| 0.001936
|
import os
import io
import time
import base64
import hashlib
import aiohttpretty
from http import client
from urllib import parse
from unittest import mock
import pytest
from waterbutler.core import (streams,
metadata,
exceptions)
from waterbutler.providers.s3 import S3Provider
from waterbutler.core.path import WaterButlerPath
from tests.utils import MockCoroutine
from tests.providers.s3.fixtures import (
auth,
credentials,
settings,
file_content,
folder_metadata,
folder_single_item_metadata,
folder_item_metadata,
version_metadata,
single_version_metadata,
folder_metadata,
folder_and_contents,
folder_empty_metadata,
file_header_metadata,
file_metadata_headers_object,
file_metadata_object,
folder_key_metadata_object,
revision_metadata_object
)
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
provider = S3Provider(auth, credentials, settings)
provider._check_region = MockCoroutine()
return provider
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
def location_response(location):
return (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'{}</LocationConstraint>'
).format(location)
def list_objects_response(keys, truncated=False):
response = '''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>'''
response += '<IsTruncated>' + str(truncated).lower() + '</IsTruncated>'
response += ''.join(map(
lambda x: '<Contents><Key>{}</Key></Contents>'.format(x),
keys
))
response += '</ListBucketResult>'
return response.encode('utf-8')
|
def bulk_delete_body(keys):
payload = '<?xml version="1.0" encoding="UTF-8"?>'
payload += '<Delete>'
payload += ''.join(map(
lambda x: '<Object><Key>{}</Key></Object>'.format(x),
keys
))
|
payload += '</Delete>'
payload = payload.encode('utf-8')
md5 = base64.b64encode(hashlib.md5(payload).digest())
headers = {
'Content-Length': str(len(payload)),
'Content-MD5': md5.decode('ascii'),
'Content-Type': 'text/xml',
}
return (payload, headers)
def build_folder_params(path):
return {'prefix': path.path, 'delimiter': '/'}
class TestRegionDetection:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
@pytest.mark.parametrize("region_name,host", [
('', 's3.amazonaws.com'),
('EU', 's3-eu-west-1.amazonaws.com'),
('us-east-2', 's3-us-east-2.amazonaws.com'),
('us-west-1', 's3-us-west-1.amazonaws.com'),
('us-west-2', 's3-us-west-2.amazonaws.com'),
('ca-central-1', 's3-ca-central-1.amazonaws.com'),
('eu-central-1', 's3-eu-central-1.amazonaws.com'),
('eu-west-2', 's3-eu-west-2.amazonaws.com'),
('ap-northeast-1', 's3-ap-northeast-1.amazonaws.com'),
('ap-northeast-2', 's3-ap-northeast-2.amazonaws.com'),
('ap-south-1', 's3-ap-south-1.amazonaws.com'),
('ap-southeast-1', 's3-ap-southeast-1.amazonaws.com'),
('ap-southeast-2', 's3-ap-southeast-2.amazonaws.com'),
('sa-east-1', 's3-sa-east-1.amazonaws.com'),
])
async def test_region_host(self, auth, credentials, settings, region_name, host, mock_time):
provider = S3Provider(auth, credentials, settings)
orig_host = provider.connection.host
region_url = provider.bucket.generate_url(
100,
'GET',
query_parameters={'location': ''},
)
aiohttpretty.register_uri('GET',
region_url,
status=200,
body=location_response(region_name))
await provider._check_region()
assert provider.connection.host == host
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_header_metadata, mock_time):
file_path = 'foobah'
params = {'prefix': '/' + file_path + '/', 'delimiter': '/'}
good_metadata_url = provider.bucket.new_key('/' + file_path).generate_url(100, 'HEAD')
bad_metadata_url = provider.bucket.generate_url(100)
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_header_metadata)
aiohttpretty.register_uri('GET', bad_metadata_url, params=params, status=404)
assert WaterButlerPath('/') == await provider.validate_v1_path('/')
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
params = {'prefix': '/' + folder_path + '/', 'delimiter': '/'}
good_metadata_url = provider.bucket.generate_url(100)
bad_metadata_url = provider.bucket.new_key('/' + folder_path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'GET', good_metadata_url, params=params,
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
response_headers = {'response-content-disposition': 'attachment'}
url = provider.bucket.new_key(path.path).generate_url(100,
response_headers=response_headers)
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = await provider.download(path)
content = await result.read()
assert content == b
|
ychab/mymoney
|
mymoney/apps/banktransactions/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 2,559
| 0.004689
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bankaccounts', '0001_initial'),
('banktransactiontags', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BankTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=255, verbose_name='Label')),
('date', models.DateField(verbose_name='Date', default=datetime.date.today)),
('amount', models.DecimalField(max_digits=10, verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(editable=False, max_length=3, verbose_name='Currency')),
('status', models.CharField(max_length=32, default='active', verbose_name='Status', help_text='Depending on its value, determine whether it could alter the bank account balance or being used by statistics.', choices=[('active', 'Active'), ('ignored', 'Ignored'), ('inactive', 'Inactive')])),
('reconciled', models.BooleanField(verbose_name='Reconciled', help_text='Whether the bank transaction has been applied on the real bank account.', default=False)),
('payment_method', models.CharField(max_length=32, default='credit_card', verbose_name='Payment method', choices=[('credit_card', 'Credit card'), ('cash', 'Cash'), ('transfer', 'Transfer'), ('transfer_internal', 'Transfer internal'), ('check', 'Check')])),
('memo', models.TextField(blank=True, verbose_name='Memo')),
('scheduled', models.BooleanField(editable=False, default=False)),
('bankaccoun
|
t', models.ForeignKey(to='bankaccounts.BankAccount', related_name='banktransactions', on_delete=models.CASCADE)),
('tag', models.ForeignKey(related_name='banktransactions', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Tag', to='banktransactiontags.BankTransactionTag', blank=True, null=True)),
],
options={
'get_latest_by': 'date',
'db_table': 'banktransactions',
},
),
|
migrations.AlterIndexTogether(
name='banktransaction',
index_together=set([('bankaccount', 'reconciled'), ('bankaccount', 'date'), ('bankaccount', 'amount')]),
),
]
|
intelxed/mbuild
|
mbuild/doxygen.py
|
Python
|
apache-2.0
| 12,057
| 0.015012
|
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
############################################################################
## START OF IMPORTS SETUP
############################################################################
import sys
import os
import re
import copy
import glob
import types
try:
from . import base
from . import dag
from . import util
from . import plan
except:
s = "\nXED ERROR: mfile.py could not find mbuild." + \
" Should be a sibling of the xed2 directory.\n\n"
sys.stderr.write(s)
sys.exit(1)
###########################################################################
## DOXYGEN SUPPORT
###########################################################################
def _doxygen_version_okay(s, want_major, want_minor, want_fix):
values = s.split('.')
maj =int(values[0])
minor = int(values[1])
fix = 0
if len(values) > 2:
# remove everything after the dash for things like: 'Doxygen
# 1.5.1-p1'
values[2] = re.sub(r'-.*$','',values[2])
try:
fix = int(values[2])
except ValueError as v:
pass
if (maj > 1) or \
(maj == want_major and minor > want_minor) or \
(maj == want_major and minor == want_minor and fix >= want_fix):
return True
return False
def _find_doxygen(env):
"""Find the right version of doxygen. Return a tuple of the
command name and a boolean indicating whether or not the version
checked out."""
if env['doxygen_cmd'] == '':
doxygen_cmd_intel = "/usr/intel/bin/doxygen"
doxygen_cmd_cygwin = "C:/cygwin/bin/doxygen"
doxygen_cmd_mac = \
"/Applications/Doxygen.app/Contents/Resources/doxygen"
doxygen_cmd = "doxygen"
if env['build_os'] == 'win':
if os.path.exists(doxygen_cmd_cygwin):
doxygen_cmd = doxygen_cmd_cygwin
else:
base.msgb('DOXYGEN',"Could not find cygwin's doxygen," +
"trying doxygen from PATH")
elif env['build_os'] == 'lin':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_intel)
if os.path.exists(doxygen_cmd_intel):
doxygen_cmd = doxygen_cmd_intel
elif env['build_os'] == 'mac':
if base.verbose(2):
base.msgb("CHECKING FOR", doxygen_cmd_mac)
if os.path.exists(doxygen_cmd_mac):
doxygen_cmd = doxygen_cmd_mac
else:
doxygen_cmd = env['doxygen_cmd']
doxygen_cmd = env.escape_string(doxygen_cmd)
doxygen_okay = False
if base.verbose(2):
base.msgb('Checking doxygen version','...')
if base.check_python_version(2,4):
try:
(retval, output, error_output) = \
util.run_command(doxygen_cmd + " --version")
if retval==0:
|
if len(output) > 0:
first_line = output[0].strip()
if base.verbose(2):
base.msgb("Doxygen version", first_line)
d
|
oxygen_okay = _doxygen_version_okay(first_line, 1,4,6)
else:
for o in output:
base.msgb("Doxygen-version-check STDOUT", o)
if error_output:
for line in error_output:
base.msgb("STDERR ",line.rstrip())
except:
base.die("Doxygen required by the command line options " +
"but no doxygen found")
return (doxygen_cmd, doxygen_okay)
def _replace_match(istring, mtch, newstring, group_name):
"""This is a lame way of avoiding regular expression backslashing
issues"""
x1= mtch.start(group_name)
x2= mtch.end(group_name)
ostring = istring[0:x1] + newstring + istring[x2:]
return ostring
def _customize_doxygen_file(env, subs):
"""Change the $(*) strings to the proper value in the config file.
Returns True on success"""
# doxygen wants quotes around paths with spaces
for k,s in iter(subs.items()):
if re.search(' ',s):
if not re.search('^".*"$',s):
base.die("Doxygen requires quotes around strings with spaces: [%s]->[%s]" %
( k,s))
return False
# input and output files
try:
lines = open(env['doxygen_config']).readlines()
except:
base.msgb("Could not open input file: " + env['doxygen_config'])
return False
env['doxygen_config_customized'] = \
env.build_dir_join(os.path.basename(env['doxygen_config']) + '.customized')
try:
ofile = open(env['doxygen_config_customized'],'w')
except:
base.msgb("Could not open output file: " + env['doxygen_config_customized'])
return False
# compile the patterns
rsubs = {}
for k,v in iter(subs.items()):
rsubs[k]=re.compile(r'(?P<tag>[$][(]' + k + '[)])')
olines = []
for line in lines:
oline = line
for k,p in iter(rsubs.items()):
#print ('searching for', k, 'to replace it with', subs[k])
m = p.search(oline)
while m:
#print ('replacing', k, 'with', subs[k])
oline = _replace_match(oline, m, subs[k], 'tag')
m = p.search(oline)
olines.append(oline)
try:
for line in olines:
ofile.write(line)
except:
ofile.close()
base.msgb("Could not write output file: " + env['doxygen_config_customized'])
return False
ofile.close()
return True
def _build_doxygen_main(args, env):
"""Customize the doxygen input file. Run the doxygen command, copy
in any images, and put the output in the right place."""
if isinstance(args, list):
if len(args) < 2:
base.die("Need subs dictionary and dummy file arg for the doxygen command " +
"to indicate its processing")
else:
base.die("Need a list for _build_doxygen_main with the subs " +
"dictionary and the dummy file name")
(subs,dummy_file) = args
(doxygen_cmd, doxygen_okay) = _find_doxygen(env)
if not doxygen_okay:
msg = 'No good doxygen available on this system; ' + \
'Your command line arguments\n\trequire it to be present. ' + \
'Consider dropping the "doc" and "doc-build" options\n\t or ' + \
'specify a path to doxygen with the --doxygen knob.\n\n\n'
return (1, [msg]) # failure
else:
env['DOXYGEN'] = doxygen_cmd
try:
okay = _customize_doxygen_file(env, subs)
except:
base.die("CUSTOMIZE DOXYGEN INPUT FILE FAILED")
if not okay:
return (1, ['Doxygen customization failed'])
cmd = env['DOXYGEN'] + ' ' + \
env.escape_string(env['doxygen_config_customized'])
if base.verbose(2):
base.msgb("RUN DOXYGEN", cmd)
(retval, output, error_output) = util.run_command(cmd)
for line in output:
base.msgb("DOX",line.rstrip())
if error_output:
for line in error_output:
base.msgb("DOX-ERROR",line.rstrip())
if retval != 0:
base.msgb("DOXYGEN FAILED")
base.die("Doxygen run failed. Retval=", str(retval))
util.touch(dummy_file)
base.msgb("DOXYGEN","succeeded")
return (0, []) # success
###########################################################################
# Dox
|
Ebag333/Pyfa
|
eos/effects/systemoverloadhardening.py
|
Python
|
gpl-3.0
| 406
| 0.004926
|
# systemOverloadHardening
#
# Used by:
# Celestials named like: Red Giant Beacon Class (6 of 6)
runTime = "
|
early"
type = ("projected", "passive")
def handler(fit, module, context):
fit.modules.filteredItemMultiply(lambda mod: "overloadHardeningBonus" in mod.itemModifiedAttributes,
"overloadHardeningBonus", module.getModifiedItemAttr("o
|
verloadBonusMultiplier"))
|
frac/celery
|
examples/celery_http_gateway/settings.py
|
Python
|
bsd-3-clause
| 2,931
| 0.000341
|
# Django settings for celery_http_gateway project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
CARROT_BACKEND = "amqp"
CELERY_RESULT_BACKEND = "database"
BROKER_HOST = "localhost"
BROKER_VHOST = "/"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_ENGINE = 'sqlite3'
# path to database file if using sqlite3.
DATABASE_NAME = 'development.db'
# Not used with sqlite3.
DATABASE_USER = ''
# Not used with sqlite3.
DATABASE_PASSWORD = ''
# Set to empty string for localhost. Not used with sqlite3.
DATABASE_HOST = ''
# Set to empty string for default. Not used with sqlite3.
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices c
|
an be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the inter
|
nationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#1i=edpk55k3781$z-p%b#dbn&n+-rtt83pgz2o9o)v8g7(owq'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'celery_http_gateway.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.