text stringlengths 4 1.02M | meta dict |
|---|---|
'''
Helper class for parsing input data and generating train/test sets.
Author: Tushar Makkar <tusharmakkar08[at]gmail.com>
Date: 17.11.2014
'''
import csv, numpy, random
def randomize_inputs(X, y):
'''
Randomizes the input samples,
just in case they are neatly ordered in the raw form.
Args:
X: data samples
y: outputs for the data samples in X
Returns:
The shuffled input data samples.
'''
sequence = range(len(y))
random.shuffle(sequence)
new_X = []
new_y = []
for i in sequence:
new_X.append(X[i])
new_y.append(y[i])
return (new_X, new_y)
def parse_input_no_conversion (input_file, custom_delimiter,
input_columns, output_column, is_test):
'''
This function parses the input data file,
performing no conversion on the input values.
Args:
input_file: The file containing the input data.
custom_delimiter: The delimiter used in the input files.
input_columns: Which columns in the input data are inputs (X).
If input_columns is empty, the data samples
have variable length.
output_column: Which column in the input data is output
value (y).
is_test: Set to True if we are parsing input from a test set.
Returns:
A set (X, y) containing the input data.
'''
data_reader = csv.reader(open(input_file, 'rb'),
delimiter=custom_delimiter)
if not is_test:
X = []
y = []
for row in data_reader:
line_x = [1] # Add the X0=1
while '' in row:
row.remove("")
if input_columns != []:
for i in input_columns:
if i < len(row):
line_x.append(row[i])
X.append(line_x)
y.append(row[output_column])
else:
for i in range(len(row)):
line_x.append(row[i])
X.append(line_x)
print X
(X, y) = randomize_inputs(X, y)
else:
X = []
for row in data_reader:
line_x = [1]
for i in range(len(row)):
line_x.append(row[i])
X.append(line_x)
y = [0.0] * len(X) # Dummy y
(X, y) = randomize_inputs(X, y)
return (X, y)
def parse_input(input_file, custom_delimiter, input_columns,
output_column, is_test, input_literal_columns,
input_label_mapping, output_literal,
output_label_mapping):
'''
This function parses the input data file and converts
literal values using the specified mappings.
Args:
input_file: The file containing the input data.
custom_delimiter: The delimiter used in the input files.
input_columns: Which columns in the input data are inputs (X).
If input_columns is empty, the data samples have variable length
output_column: Which column in the input data is output
value (y).
is_test: Set to True if we are parsing input from a test set.
input_literal_columns: Which columns in the input data
have a literal description and need to be mapped to custom
numeric values.
input_label_mapping: Mapping for input literal columns.
output_literal: Boolean, shows whether output is literal
or numeric.
output_label_mapping: Mapping for output literal column.
Returns:
A set (X, y) containing the input data.
'''
data_reader = csv.reader(open(input_file, 'rb'),
delimiter=custom_delimiter)
if not is_test:
X = []
y = []
index = 0
for row in data_reader:
line_x = [1] # Add the X0=1
while '' in row:
row.remove("")
if input_columns != []:
for i in input_columns:
if input_literal_columns[i] == 1:
line_x.append(float(
input_label_mapping[i][row[i]]))
else:
line_x.append(float(row[i]))
X.append(line_x)
if output_literal:
y.append(float(output_label_mapping[
row[output_column]]))
else:
y.append(float(row[output_column]))
else:
for i in range(len(row)):
line_x.append(float(row[i]))
X.append(line_x)
(X, y) = randomize_inputs(X, y)
else:
X = []
for row in data_reader:
line_x = [1]
for i in range(len(row)):
line_x.append(float(row[i]))
X.append(line_x)
y = [0.0] * len(X) # Dummy y
(X, y) = randomize_inputs(X, y)
return (X, y)
def readInputData(input_file, input_test_file,
convert_literals, custom_delimiter, proportion_factor, split,
input_columns, output_column, input_literal_columns,
input_label_mapping, output_literal, output_label_mapping):
'''
Main method for parsing the input data. The input data is
expected in CSV format, with a delimiter that can be
specified as parameter.
The method generates a random permutation of the
read data to be safe in case the original raw data is nicely ordered
It uses the proportion_factor to determine how much data should be
for training and how much for testing.
Args:
input_file: The file containing the input data.
input_test_file: The file containing the test data
(if applicable).
convert_literals: If True, the literals in the input files
will be converted to numeric values as per the given mappings.
custom_delimiter: The delimiter used in the input files.
proportion_factor: If there is no special input_test_file,
a percentage of proportion_factor% from the input_file will be
used as test data. The samples are randomly selected.
split: If true, the test data will be taken from input_file.
Otherwise, from input_test_file.
input_columns: Which columns in the input data are inputs (X).
output_column: Which column in the input data is output value
(y).
input_literal_columns: Which columns in the input data have a
literal description and need to be mapped to
custom numeric values.
input_label_mapping: Mapping for input literal columns.
output_literal: Boolean, shows whether output is literal or
numeric.
output_label_mapping: Mapping for output literal column.
Returns:
A set (train_X, train_y, test_X, test_y) containing training
data and test data. The test_y array contains dummy values.
'''
if convert_literals:
(X, y) = parse_input(input_file, custom_delimiter,
input_columns, output_column, False, input_literal_columns,
input_label_mapping, output_literal, output_label_mapping)
else:
(X, y) = parse_input_no_conversion(input_file, custom_delimiter,
input_columns, output_column, False)
if split:
splice_index = int(len(y) * proportion_factor)
train_X = X[splice_index:]
train_y = y[splice_index:]
test_X = X[:splice_index]
test_y = y[:splice_index]
if convert_literals:
return (numpy.array(train_X), numpy.array(train_y),
numpy.array(test_X), numpy.array(test_y))
else:
return (train_X, train_y, test_X, test_y)
else:
# Take test values from input_test_file -- we assume same
# format as input_file!
(test_X, test_y) = parse_input(input_test_file,
custom_delimiter, input_columns, output_column, True,
input_literal_columns, input_label_mapping, output_literal,
output_label_mapping)
if convert_literals:
return (numpy.array(X), numpy.array(y), numpy.array(test_X),
numpy.array(test_y))
else:
return (X, y, test_X, test_y)
| {
"content_hash": "30c408e171b970f171d8912255d7214e",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 72,
"avg_line_length": 36.52914798206278,
"alnum_prop": 0.5808986005401424,
"repo_name": "tusharmakkar08/Feature_Selection_Symbiotic",
"id": "42b3c05653a8441911935151caa28c41c0cdd9b9",
"size": "8146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datareader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23681"
}
],
"symlink_target": ""
} |
from admin_views.admin import AdminViews
from conferences.models.conference import Conference
from django.contrib import admin, messages
from django.shortcuts import redirect
from newsletters.exporter import Endpoint
from notifications.aws import send_endpoints_to_pinpoint
from pretix import get_all_order_positions, get_items
from users.models import User
from .exporter import convert_user_to_endpoint
from .models import Subscription
def get_positions_with_missing_user(users, conference):
admission_items = get_items(conference, {"admission": True})
admission_items_ids = list(admission_items.keys())
order_positions = [
position
for position in get_all_order_positions(
conference,
{"order__status": "p", "item__in": ",".join(admission_items_ids)},
)
if position["attendee_email"]
]
position_by_email = {
position["attendee_email"]: position for position in order_positions
}
order_emails = set(position_by_email)
user_emails = set([user.email for user in users])
missing_emails = order_emails - user_emails
for email in missing_emails:
yield position_by_email[email]
def get_missing_users_endpoints(users):
conference = Conference.objects.last()
positions = get_positions_with_missing_user(users, conference)
return [
Endpoint(
id=f"pretix_{position['attendee_email']}",
name=position["attendee_name"],
full_name=position["attendee_name"],
email=position["attendee_email"],
is_staff=False,
has_sent_submission_to=[],
has_item_in_schedule=[],
has_cancelled_talks=[],
has_ticket=[conference.code],
talks_by_conference={},
)
for position in positions
]
@admin.register(Subscription)
class SubscriptionAdmin(AdminViews):
list_display = ("email", "date_subscribed")
admin_views = (("Export all users to Pinpoint", "export_all_users_to_pinpoint"),)
def export_all_users_to_pinpoint(self, request, **kwargs):
users = User.objects.all()
endpoints = [
convert_user_to_endpoint(user) for user in users
] + get_missing_users_endpoints(users)
send_endpoints_to_pinpoint(endpoints)
self.message_user(
request, "Exported all the users to Pinpoint", level=messages.SUCCESS
)
return redirect("admin:index")
| {
"content_hash": "939023ab33ce4b885be690d008fb56c6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 85,
"avg_line_length": 31.39240506329114,
"alnum_prop": 0.6540322580645161,
"repo_name": "patrick91/pycon",
"id": "b93046b5511356b9b2cb7d8d3da3bc59149ed4f7",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/newsletters/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
} |
"""George Goerg's one-point reach curve model.
This class fits the model described in George Goerg's paper:
Goerg, Georg M. "Estimating reach curves from one data point." (2014).
Goerg assumes the underlying reach curve is determined by an exponential-
Poisson distribution with unknown mixing parameter, and shows how the
reach curve can be extrapolated from a single point on it.
"""
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
from wfa_planning_evaluation_framework.models.reach_curve import ReachCurve
class GoergModel(ReachCurve):
"""Goerg single-point reach curve model."""
def __init__(self, data: [ReachPoint]):
"""Constructs an Goerg single point reach model.
Args:
data: A list of ReachPoints to which the model is to be fit.
max_reach: Optional. If specified, the maximum possible reach that can
be achieved.
"""
super().__init__(data)
if len(data) != 1:
raise ValueError("Exactly one ReachPoint must be specified")
if data[0].impressions[0] < 0.001:
raise ValueError("Attempt to create model with 0 impressions")
self._impressions = data[0].impressions[0]
self._reach = data[0].reach(1)
self._fit()
self._max_reach = self._rho
if data[0].spends:
self._cpi = data[0].spends[0] / data[0].impressions[0]
else:
self._cpi = None
def _fit(self) -> None:
"""Fits a model to the data that was provided in the constructor."""
if self._impressions <= self._reach:
raise ValueError("Cannot fit Goerg model when impressions <= reach.")
else:
self._rho = (self._impressions * self._reach) / (
self._impressions - self._reach
)
self._beta = self._rho
def by_impressions(self, impressions: [int], max_frequency: int = 1) -> ReachPoint:
"""Returns the estimated reach as a function of impressions.
Args:
impressions: list of ints of length 1, specifying the hypothetical number
of impressions that are shown.
max_frequency: int, specifies the number of frequencies for which reach
will be reported.
Returns:
A ReachPoint specifying the estimated reach for this number of impressions.
"""
if len(impressions) != 1:
raise ValueError("Impressions vector must have a length of 1.")
kplus_reach_list = []
for k in range(1, max_frequency + 1):
kplus_reach = (
self._rho * (impressions[0] / (impressions[0] + self._beta)) ** k
)
kplus_reach_list.append(kplus_reach)
if self._cpi:
spend = impressions[0] * self._cpi
return ReachPoint(impressions, kplus_reach_list, [spend])
else:
return ReachPoint(impressions, kplus_reach_list)
def by_spend(self, spends: [int], max_frequency: int = 1) -> ReachPoint:
"""Returns the estimated reach as a function of spend assuming constant CPM
Args:
spend: list of floats of length 1, specifying the hypothetical spend.
max_frequency: int, specifies the number of frequencies for which reach
will be reported.
Returns:
A ReachPoint specifying the estimated reach for this number of impressions.
"""
if not self._cpi:
raise ValueError("Impression cost is not known for this ReachPoint.")
if len(spends) != 1:
raise ValueError("Spend vector must have a length of 1.")
return self.by_impressions([spends[0] / self._cpi], max_frequency)
def impressions_for_spend(self, spend: float) -> int:
if not self._cpi:
raise ValueError("Impression cost is not known for this ReachPoint.")
return spend / self._cpi
| {
"content_hash": "519b7639d427be48a28da145a9f749b1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 41.4,
"alnum_prop": 0.6211543351131452,
"repo_name": "world-federation-of-advertisers/planning-evaluation-framework",
"id": "719afee04c65036aacb02d90b8c00c6c8168ebab",
"size": "4550",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/models/goerg_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1319945"
},
{
"name": "Python",
"bytes": "614182"
}
],
"symlink_target": ""
} |
'''
This module contains utility functions for producing the figures and tables,
primarily compiling the data for them to use.
'''
from os.path import join as _join
def crop_png(filename):
'''Autocrop excess white margins from PNG file
Args
----
filename: str
Path and filename with extension to .png file to crop
'''
from PIL import Image
img = Image.open(filename)
img.crop(img.getbbox()).save(filename)
return None
def filt_paths(path_project, cfg_ann):
import numpy
import os
from ..config import paths, fnames
from .. import utils
# Get list of paths filtered by parameters in `cfg_ann['data']`
path_tag = _join(path_project, paths['tag'])
path_glide = _join(path_project, paths['glide'])
data_paths = list()
exp_names = list()
for p in os.listdir(path_tag):
path_exp = _join(path_tag, p)
if os.path.isdir(path_exp):
# Concatenate data path
path_glide_data = _join(path_project, path_glide, p)
path_subdir = utils.get_subdir(path_glide_data, cfg_ann['data'])
data_paths.append(_join(path_glide_data, path_subdir))
exp_names.append(p)
sort_ind = numpy.argsort(data_paths)
data_paths = numpy.array(data_paths)[sort_ind]
exp_names = numpy.array(exp_names)[sort_ind]
return data_paths, exp_names
def compile_exp_data(path_project, field, cfg_ann):
'''Walk root tag directory and compile derived values to dataframe
Args
----
field: pandas.DataFrame
Field experiments with added rho_mod
paths: OrderedDict
Dictionary of smartmove project paths
fnames: OrderedDict
Dictionary of smartmove project filenames
cfg_ann: OrderedDict
Configuration dictionary for ANN analysis
match data file from data_accelerometer/
'''
from collections import OrderedDict
import numpy
import os
import pandas
from pyotelem.plots import plotutils
from ..ann import pre
from ..config import paths, fnames
def get_sgl_path(path_project, path_glide, path_exp, cfg):
'''Concatenate path to subglide output directory'''
from .. import utils
glide_data_path = _join(path_project, path_glide, path_exp)
subdir_path = utils.get_subdir(glide_data_path, cfg)
glide_data_path = _join(glide_data_path, subdir_path)
return glide_data_path
cols = [ 'id', 'date', 'animal', 'mod_str', 'duration', 'n_dives',
'n_sgls_asc', 'n_sgls_des', 'sgls_asc_str', 'sgls_des_str',
'perc_des', 'perc_asc', 'tritium_id', 'density_kgm3', 'rho_mod']
data = OrderedDict()
exp_ids = field['exp_id'].values
for c in cols:
data[c] = numpy.array([None,]*len(exp_ids))
for i in range(len(exp_ids)):
path = _join(path_project, paths['tag'], exp_ids[i])
if os.path.isdir(path):
exp = field.ix[i, 'exp_id']
data['date'][i] = '{}-{}-{}'.format(exp[:4], exp[4:6], exp[6:8])
data['animal'][i] = exp.split('_')[3]
file_tag = _join(path, 'pydata_{}.p'.format(exp))
tag = pandas.read_pickle(file_tag)
file_mask = _join(path, 'mask_tag.p')
masks = pandas.read_pickle(file_mask)
start = tag['datetimes'][masks['exp']].iloc[0]
stop = tag['datetimes'][masks['exp']].iloc[-1]
n_seconds = (stop - start).total_seconds()
h, m, s = plotutils.hourminsec(n_seconds)
data['duration'][i] = r"{:1.0f}h {:02.0f}$'$ {:02.0f}$''$".format(h, m, s)
# Create string for mod type
block_type = field.ix[i, 'block_type']
n_blocks = int(field.ix[i, 'n_blocks'])
if n_blocks > 0:
data['mod_str'][i] = '{:1d} {:>7}'.format(n_blocks, block_type)
else:
data['mod_str'][i] = '{:>9}'.format(block_type)
# Get number of subglides during descent & ascent
cfg = cfg_ann['data']
glide_data_path = get_sgl_path(path_project, paths['glide'], exp, cfg)
sgls = pandas.read_pickle(_join(glide_data_path,
fnames['glide']['sgls']))
sgls = sgls[~numpy.isnan(sgls['dive_id'].astype(float))]
n_sgls_des = len(sgls[sgls['dive_phase'] == 'ascent'])
n_sgls_asc = len(sgls[sgls['dive_phase'] == 'descent'])
# Save number of SGLs with phase
data['n_sgls_des'][i] = float(n_sgls_des)
data['n_sgls_asc'][i] = float(n_sgls_asc)
# Calculate percent of SGLs with phase per total N
perc_des = n_sgls_des / (n_sgls_des+n_sgls_asc)*100
perc_asc = n_sgls_asc / (n_sgls_des+n_sgls_asc)*100
# Create string with number of SGLs with phase and (% of total N)
fmt = '{:>4} ({:2.0f})'
data['sgls_des_str'][i] = fmt.format(n_sgls_des, perc_des)
data['sgls_asc_str'][i] = fmt.format(n_sgls_asc, perc_asc)
# Save numeric percent of SGLs with phase per total N
data['perc_des'][i] = perc_des
data['perc_asc'][i] = perc_asc
# Number of dives
n_dives = len(numpy.unique(sgls['dive_id']))
data['n_dives'][i] = n_dives
# Isotop analysis ID
data['tritium_id'][i] = field.ix[i, 'tritium_id']
# Total original and modified body density
data['density_kgm3'][i] = field.ix[i, 'density_kgm3']
data['rho_mod'][i] = field.ix[i, 'rho_mod']
data['id'] = numpy.array(list(range(len(data['date'])))) + 1
field_all = pandas.DataFrame(data)
# Digitize animals
animals = sorted(numpy.unique(field_all['animal']), reverse=True)
i = 1
for a in animals:
ind = numpy.where(field_all['animal'] == a)
field_all.loc[field_all.index[ind], 'animal'] = i
i += 1
for key in field_all.columns:
try:
field_all.ix[:,key] = pandas.to_numeric(field_all[key])
except:
pass
return field_all
def filter_dataframe(df, ignore):
'''Filter dataframe to columns not in ignore list'''
return df[[c for c in df.columns if c not in ignore]][:]
def parse_col_txt(cols):
# Unit string conversion
unit_dict = {'kg':r'$(kg)$', 'cm':r'$(cm)$', 'l':r'$(L)$', 'perc':r'$(\%)$',
'kgm3':r'$(kg \cdot m\textsuperscript{-3})'}
names = ['']*len(cols)
units = ['']*len(cols)
for i in range(len(cols)):
c = cols[i].split('_')
names[i] = c[0][0].upper() + c[0][1:]
if len(c) > 1:
units[i] = unit_dict[c[1]]
return names, units
def target_value_descr(post):
import numpy
import pandas
br = post['ann']['bins']['values']
bl = post['ann']['bins']['lipid_perc']
str_rho = ['{} - {}'.format(br[i], br[i+1]) for i in range(len(br)-1)]
str_lip = ['{:.2f} - {:.2f}'.format(bl[i], bl[i+1]) for i in range(len(bl)-1)]
ubins = numpy.unique(str_rho)
columns = ['bin', 'range_rho', 'range_lipid']
dfout = pandas.DataFrame(index=range(len(ubins)), columns=columns)
for i in range(len(ubins)):
dfout['bin'][i] = i + 1
dfout['range_rho'][i] = str_rho[i]
dfout['range_lipid'][i] = str_lip[i]
return dfout
def target_value_stats(train, valid, test):
import numpy
import pandas
bins_all = numpy.hstack([train[1], valid[1], test[1]])
ubins = numpy.unique(bins_all)
columns = ['bin', 'n', 'perc']
dfout = pandas.DataFrame(index=range(len(ubins)), columns=columns)
for i in range(len(ubins)):
n = len(numpy.where(bins_all==ubins[i])[0])
dfout['bin'][i] = ubins[i] + 1
dfout['n'][i] = n
dfout['perc'][i] = n/len(bins_all)*100
for key in ['n', 'perc']:
dfout[key] = pandas.to_numeric(dfout[key])
return dfout
def input_feature_stats(df, feature_cols):
'''Produce stats for paper tables
Args
----
df: pandas.dataframe
Final `sgls_all.p` dataframe filtered and saved to model output dir
feature_cols: ndarray
List of string names of feature columns
Returns
-------
df_out: pandas_dataframe
Dataframe containing statistics for input features used in ANN
'''
import pandas
features = [r'Absolute depth change ($m$)',
r'Dive phase',
r'Mean acceleration ($g$)',
r'Mean depth ($m$)',
r'Mean pitch ($\degree$)',
r'Mean speed ($m \cdot s\textsuperscript{-1}$)',
r'Mean seawater density ($kg \cdot m\textsuperscript{-3}$)',
r'Total depth change ($m$)',
r'Total speed change ($m \cdot s\textsuperscript{-1}$)']
columns = ['feature', 'min', 'max', 'mean', 'std']
dfout = pandas.DataFrame(index=range(len(feature_cols)), columns=columns)
dfout['feature'][:] = features
for i in range(len(feature_cols)):
d = df[feature_cols[i]]
dfout['min'][i] = d.min()
dfout['max'][i] = d.max()
dfout['mean'][i] = d.mean()
dfout['std'][i] = d.std()
for key in ['min', 'max', 'mean', 'std']:
dfout[key] = pandas.to_numeric(dfout[key])
return dfout
def last_monitors(results_dataset):
'''Extract the last value from each monitor in results_dataset
Args
----
results_dataset: OrderedDict
Contains results from each trained ANN with varying fraction of train
Returns
-------
m: dict
Dict of `train` and `valid` dicts with `err`, `loss`, and `acc`
'''
import numpy
m = dict()
m['n_train'] = results_dataset['n_train']
set_keys = ['train', 'valid']
monitor_keys = ['err', 'loss', 'acc']
# Process for each dataset in `set_keys`
for skey in set_keys:
n_networks = len(results_dataset['monitors'])
m[skey] = dict()
# Create output array for each monitor in `monitor_keys`
for mkey in monitor_keys:
m[skey][mkey] = numpy.zeros(n_networks, dtype=float)
# Iterate through results from network trainings
for i in range(n_networks):
# Pointer to monitor dicts for `i`th network training
monitors = results_dataset['monitors'][i][skey]
# Get last value from list of monitors from training
for mkey in monitor_keys:
m[skey][mkey][i] = monitors[mkey][-1]
return m
#def perc_with_time(field_all):
# ''''Dive and sub-glide plot with increasing durations'''
# import datetime
# import numpy
# import pandas
# import seaborn
# import matplotlib.pyplot as plt
#
# dates = numpy.zeros(len(field_all), dtype=object)
# for i in range(len(field_all)):
# dates[i] = datetime.datetime.strptime(field_all['duration'][i],
# '%Hhr %Mmin')
# deltas = dates - datetime.datetime(1900,1,1)
# deltas = numpy.array(list(map(lambda x: x.seconds, deltas)))
# sort_ind = deltas.argsort()
#
# colors = seaborn.color_palette()
#
# cols = {'n_dives':'Dives', 'n_sgls_des':'subglides (descent)',
# 'n_sgls_asc':'subglides (ascent)'}
#
# fig, (ax0, ax1) = plt.subplots(1, 2)
#
# i = 0
# pos = 0
# width = 0.25
# for key in ['n_dives', 'n_sgls_des', 'n_sgls_asc']:
# if i == 0:
# ax = ax0
# offset = width
# else:
# ax = ax1
# pos += 1
# offset = (pos*width) + width
# labels = field_all.ix[sort_ind, 'label']
# xs = numpy.arange((len(labels))) + offset
# ys = field_all.ix[sort_ind, key]
# ax.bar(xs, ys, width, color=colors[i], label=cols[key])
# if i == 0:
# for l, x, y in zip(durs[sort_ind], xs, ys):
# text = ax.annotate(l, xy=(x,y),
# horizontalalignment='left',
# verticalalignment='bottom')
# text.set_rotation(45)
# ax.set_xticks(xs)
# ax.set_xticklabels(field_all.ix[sort_ind, 'id'])
# ax.set_ylabel('No. {}'.format(cols[key].split()[0]))
# plt.xlabel('Exp. Duration (minutes)')
# ax.legend(loc='upper left')
# i += 1
#
# plt.legend()
# plt.show()
#
# return None
| {
"content_hash": "8e2675250ef1f495ff14fe758a0a90de",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 86,
"avg_line_length": 31.539240506329115,
"alnum_prop": 0.5537807031626264,
"repo_name": "ryanjdillon/smartmove",
"id": "37d731a13b287121d3983ab0d8047ed167408abd",
"size": "12458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartmove/visuals/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188926"
}
],
"symlink_target": ""
} |
import os
import yaml
import popper.scm as scm
from hashlib import shake_256
from popper.cli import log as log
from box import Box
class ConfigLoader(object):
@staticmethod
def load(
engine_name=None,
resman_name=None,
config_file=None,
workspace_dir=os.getcwd(),
reuse=False,
dry_run=False,
quiet=False,
skip_pull=False,
skip_clone=False,
pty=False,
allow_undefined_secrets_in_ci=False,
):
"""Loads and creates a configuration, represented by a frozen Box
"""
workspace_dir = os.path.realpath(workspace_dir)
repo = scm.new_repo(workspace_dir)
# path to cache
if os.environ.get("POPPER_CACHE_DIR", None):
cache_dir = os.environ["POPPER_CACHE_DIR"]
else:
cache_dir_default = os.path.join(os.environ["HOME"], ".cache")
cache_dir = os.environ.get("XDG_CACHE_HOME", cache_dir_default)
cache_dir = os.path.join(cache_dir, "popper")
from_file = ConfigLoader.__load_config_from_file(
config_file, engine_name, resman_name
)
pp_config = {
"workspace_dir": workspace_dir,
"reuse": reuse,
"dry_run": dry_run,
"quiet": quiet,
"skip_pull": skip_pull,
"skip_clone": skip_clone,
"pty": pty,
"allow_undefined_secrets_in_ci": allow_undefined_secrets_in_ci,
# if no git repository exists in workspace_dir or its parents, the repo
# variable is None and all git_* variables are assigned to 'na'
"repo": repo,
"git_commit": scm.get_sha(repo),
"git_sha_short": scm.get_sha(repo, short=7),
"git_branch": scm.get_branch(repo),
"git_tag": scm.get_tag(repo),
"git_remote_origin_url": scm.get_remote_url(repo),
# wid is used to associate a unique id to this workspace. This is then
# used by runners to name resources in a way that there is no name
# clash between concurrent workflows being executed
"wid": shake_256(workspace_dir.encode("utf-8")).hexdigest(4),
"cache_dir": cache_dir,
"engine_name": from_file["engine_name"],
"resman_name": from_file["resman_name"],
"engine_opts": from_file["engine_opts"],
"resman_opts": from_file["resman_opts"],
}
return Box(pp_config, default_box=True, frozen_box=True)
@staticmethod
def __load_config_from_file(config_file, engine_name, resman_name):
from_file = ConfigLoader.__load_config_file(config_file)
loaded_conf = {}
eng_section = from_file.get("engine", None)
eng_from_file = from_file.get("engine", {}).get("name")
if from_file and eng_section and not eng_from_file:
log.fail("No engine name given.")
resman_section = from_file.get("resource_manager", None)
resman_from_file = from_file.get("resource_manager", {}).get("name")
if from_file and resman_section and not resman_from_file:
log.fail("No resource manager name given.")
# set name in precedence order (or assigne default values)
if engine_name:
loaded_conf["engine_name"] = engine_name
elif eng_from_file:
loaded_conf["engine_name"] = eng_from_file
else:
loaded_conf["engine_name"] = "docker"
if resman_name:
loaded_conf["resman_name"] = resman_name
elif resman_from_file:
loaded_conf["resman_name"] = resman_from_file
else:
loaded_conf["resman_name"] = "host"
engine_opts = from_file.get("engine", {}).get("options", {})
resman_opts = from_file.get("resource_manager", {}).get("options", {})
loaded_conf["engine_opts"] = engine_opts
loaded_conf["resman_opts"] = resman_opts
return loaded_conf
@staticmethod
def __load_config_file(config_file):
"""Validate and parse the engine configuration file.
Args:
config_file(str): Path to the file to be parsed.
Returns:
dict: Engine configuration.
"""
if isinstance(config_file, dict):
return config_file
if not config_file:
return dict()
if not os.path.exists(config_file):
log.fail(f"File {config_file} was not found.")
if not config_file.endswith(".yml"):
log.fail("Configuration file must be a YAML file.")
with open(config_file, "r") as cf:
data = yaml.load(cf, Loader=yaml.Loader)
if not data:
log.fail("Configuration file is empty.")
return data
| {
"content_hash": "4370c88770797d3cc187fa38f9ebc06b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 83,
"avg_line_length": 34.62589928057554,
"alnum_prop": 0.5732391439850405,
"repo_name": "systemslab/popper",
"id": "a5138b1a6f01923567cb03da951a088962768dbb",
"size": "4813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/popper/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1473"
},
{
"name": "Python",
"bytes": "105732"
},
{
"name": "Shell",
"bytes": "42622"
}
],
"symlink_target": ""
} |
import json
import click
from tabulate import tabulate
@click.command('keys', short_help='List API keys')
@click.pass_obj
def cli(obj):
"""List API keys."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/keys')
click.echo(json.dumps(r['keys'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'key': 'API KEY', 'user': 'USER', 'scopes': 'SCOPES', 'text': 'TEXT',
'expireTime': 'EXPIRES', 'count': 'COUNT', 'lastUsedTime': 'LAST USED', 'customer': 'CUSTOMER'
}
click.echo(tabulate([k.tabular(timezone) for k in client.get_keys()], headers=headers, tablefmt=obj['output']))
| {
"content_hash": "96d5af216ec42c7f07aebec2712ed26a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 119,
"avg_line_length": 33.86363636363637,
"alnum_prop": 0.5879194630872483,
"repo_name": "alerta/python-alerta",
"id": "d740eb181310424e45c37f803199432700ee061f",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alertaclient/commands/cmd_keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106941"
}
],
"symlink_target": ""
} |
from display.handlers.base import BaseHandler
class UiButtonsHandler(BaseHandler):
def get(self):
title = 'UiButtonsHandler'
self.render('ui/buttons.html', title = title, **self.render_dict) | {
"content_hash": "2b700e634022d39dd3109775cd3dca34",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.7109004739336493,
"repo_name": "owlsn/h_crawl",
"id": "1aea0afa1fe4aa4f453e07ed67d5fe3b3890f445",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "display/display/handlers/ui/buttons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "3257"
},
{
"name": "Python",
"bytes": "6605"
}
],
"symlink_target": ""
} |
import handson.myyaml
# import logging
import unittest
from handson.test_setup import SetUp
from handson.vpc import VPC
class TestVPC(SetUp, unittest.TestCase):
def test_vpc_cache(self):
self.reset_yaml()
handson.myyaml.yaml_file_name('./aws.yaml')
handson.myyaml.load()
v = VPC({})
v.vpc_obj() # loads from yaml
v.vpc_obj() # loads from cache
| {
"content_hash": "1862240dc1b8e965a5a637517c04197b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 23.647058823529413,
"alnum_prop": 0.6442786069651741,
"repo_name": "smithfarm/ceph-auto-aws",
"id": "e342991901a5eb0e4a7ccf45d1e5a604d6b222c4",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_vpc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "425"
},
{
"name": "Makefile",
"bytes": "655"
},
{
"name": "Python",
"bytes": "156699"
},
{
"name": "SaltStack",
"bytes": "4167"
},
{
"name": "Shell",
"bytes": "23573"
}
],
"symlink_target": ""
} |
import os
from common import config
# TODO check for path existence in all filepath functions
# not temporary
def getimagepath(imagename, *subdirs):
imagedir = os.path.join(config.PROJECT_ROOT, "images")
if len(subdirs):
dirlist = [imagedir] + list(subdirs)
imagedir = os.path.join(*dirlist)
if not os.path.exists(imagedir):
os.mkdir(imagedir)
return os.path.join(imagedir, imagename)
def getdatadir(*subdirs):
if not len(subdirs):
directory = config.DATA_DIR
else:
dirlist = [config.DATA_DIR] + list(subdirs)
directory = os.path.join(*dirlist)
return directory
def getdatapath(*args):
filename = args[0]
directory = getdatadir(*args[1:])
path = os.path.join(directory, filename)
return path
def getcachedir(*subdirs):
if not len(subdirs):
directory = config.DATA_CACHE_DIR
else:
dirlist = [config.DATA_CACHE_DIR] + list(subdirs)
directory = os.path.join(*dirlist)
if not os.path.exists(directory):
os.mkdir(directory)
return directory
def getcache(*args):
filename = args[0]
directory = getcachedir(*args[1:])
path = os.path.join(directory, filename)
return path
def getcachecontents(*subdirs):
directory = getcachedir(*subdirs)
contents = os.listdir(directory)
return contents
| {
"content_hash": "519a5f22dd6c072c50299cd4bf03a539",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 58,
"avg_line_length": 26.901960784313726,
"alnum_prop": 0.6552478134110787,
"repo_name": "sonya/eea",
"id": "332caa98309e62368a9249d48db5833dccebffea",
"size": "1952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/common/fileutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "441743"
},
{
"name": "Shell",
"bytes": "31869"
}
],
"symlink_target": ""
} |
"""Logging interface
This exposes a simplified form of logging and helps consolidate config. The
intended default logging setup is to log locally to a file and also to
AWS DynamoDB so that logs can be aggregated an exposed easily.
"""
import logging
log = None
def set_config(log_dir, filename='log.txt'):
"""Invoked once during config initialization"""
log_file = log_dir + filename
logging.basicConfig(
filename=log_file,
format='%(levelname)s:%(message)s',
loglevel=logging.INFO)
log = logging.getLogger()
log.setLevel(logging.INFO)
info('Using log file: %s' % log_file)
return log
def error(*args, **kwargs):
if log:
log.error(*args, **kwargs)
def info(*args, **kwargs):
if log:
log.info(*args, **kwargs)
| {
"content_hash": "f1d871d640a432171e21483faba09702",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 24.78125,
"alnum_prop": 0.6645649432534678,
"repo_name": "jfalkner/acumen",
"id": "c577a83dc3d63db8d05b0aa38a66c8699649ad8b",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acumen/utils/log/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1626"
},
{
"name": "HTML",
"bytes": "8386"
},
{
"name": "JavaScript",
"bytes": "68725"
},
{
"name": "Python",
"bytes": "62524"
}
],
"symlink_target": ""
} |
import networkx as nx
import random as rnd
import numpy as np
from time import clock
from collections import defaultdict
import matplotlib.pyplot as plt
import zipfile
## local modules ##
import percolations as perc
import pretty_print as pp
### data structures ###
# d_node_age[nodenumber] = ageclass
d_node_age = {}
### simulation parameters ###
numsims = 800 # number of simulations
size_epi = 515 # threshold value that designates an epidemic in the network (5% of network)
# gamma = probability of recovery at each time step
# on avg, assume 5 days till recovery
gamma = 1/float(5) # 5 days recovery here
T = 0.0643 # total epidemic size (naive, no age-dep params) = 20%
# T = 0.075 # total epidemic size (naive, no age-dep params) = 30%
# T = 0.0620 # T_avg = 0.0643 @ sigma_c = 1.15
# T = beta / (beta + gamma)
# when T = 0.0643 and gamma = 1/5, b = 0.0137
# when T = 0.075 and gamma = 1/5, b = 0.0162
b = (-T * gamma)/(T - 1)
# define different adult susceptibilities
# Cauchemez 2004 cites child susceptibility to be 1.15 times greater than that of adults
# 3/22/14: During severe seasons, the epi curves for children tend to be the same magnitude but those of adults tend to be larger than normal. Perhaps the transmissibility and susceptibility of adults is higher in severe seasons, so that is what is being checked by changing T to refer to adults.
s1, s2 = 1, 1.5
susc_list = np.linspace(s1, s2, num=6, endpoint=True)
### import data ###
graph = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_edges_Sarah.csv') # Vancouver network
graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_ages_Sarah.csv') # node number and age class
### construct age-structured network ###
G=nx.Graph()
for edge in graph:
G.add_edge(*edge.strip().split(','))
for line in graph_ages:
new_line = line.split()
for line in new_line:
node, age = line.split(',')
d_node_age[node] = age # node-ageclass dictionary
N = float(G.order())
print "network size:", N
# number of nodes of each age class
c_size, a_size = perc.child_adult_size(d_node_age)
### ziparchive to write results ###
zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/childsuscep_time_%ssims_beta%.3f_suscep%.1f-%.1f_vax0.zip' %(numsims, b, s1, s2)
###############################################
### susceptibility simulations ###
totaltime = clock()
for s in susc_list:
print "child susceptibility for current sims:", s
# create dict for susceptibilities
# children are the third age class in d_node_age, adults are the fourth
age_susc_list = [1, 1, s, 1, 1, 1]
# d_age_susc[str(age class code)] = susceptibility value
d_age_susc = dict(zip('1,2,3,4,5,6,7'.split(','), age_susc_list))
print d_age_susc.items()
## save infection and recovery tsteps for each sim
# d_save_I_tstep[simnumber] (or d_save_R_tstep) = [time step of infection/recovery where index = node number - 1 else 0]
d_save_I_tstep = defaultdict(list)
d_save_R_tstep = defaultdict(list)
# timer for all sims of one adult susceptibility
start_all = clock()
for num in xrange(numsims):
start = clock()
total_rec, I_tstep_list, R_tstep_list = perc.episim_age_time_susc(G, d_node_age, b, gamma, d_age_susc)
d_save_I_tstep[num] = I_tstep_list
d_save_R_tstep[num] = R_tstep_list
print "simtime, simnum, episize:", clock() - start, "\t", num, "\t", total_rec
print "simtime for %s sims for child suscep %1.1f" %(numsims, s), clock() - start_all
# print tsteps of infection and recovery to recreate sim
# sort order of sims so that the rows in d_save_I_tstep and d_save_R_tstep will match each other
filename = 'Results/Itstep_childsusc_time_%ssims_beta%.3f_susc%.1f_vax0.txt' %(numsims, b, s)
pp.print_sorteddlist_to_file(d_save_I_tstep, filename, numsims)
pp.compress_to_ziparchive(zipname, filename)
filename = 'Results/Rtstep_childsusc_time_%ssims_beta%.3f_susc%.1f_vax0.txt' %(numsims, b, s)
pp.print_sorteddlist_to_file(d_save_R_tstep, filename, numsims)
pp.compress_to_ziparchive(zipname, filename)
print "total time for sims:", clock() - totaltime
# reference table: probability of infection before adjusting for susceptibility
for inf in range(52):
print inf, 1- np.exp(-b * inf)
T_avg = (T * 1.15 * a_size + T * (N - a_size))/N
print "T_avg:", T_avg
| {
"content_hash": "a06ee9fd8f7a398bf06e839e122aea88",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 296,
"avg_line_length": 40.407407407407405,
"alnum_prop": 0.6968377635197067,
"repo_name": "eclee25/flu-SDI-simulations-age",
"id": "256689cd9c7722c5344e4d182794de61fea1a67d",
"size": "5257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "age_time_childsuscep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "435037"
}
],
"symlink_target": ""
} |
import os
log_dir = '/data/logs/zoro'
log_level = 'DEBUG'
log_console = True
plugins_path = './plugins/'
user_home_path = os.getenv("HOME")
user_plugins_path = os.path.join(user_home_path, '.zoro/plugins/')
user_config_path = os.path.join(user_home_path, '.zoro/config.json')
| {
"content_hash": "71f65b8a0372109dfa13759bdf846a78",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 25.363636363636363,
"alnum_prop": 0.6917562724014337,
"repo_name": "onlytiancai/zoro",
"id": "7e7194030c332666c68946ad40c59655ae524be2",
"size": "326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zoro/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27810"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Create an admin user (if not exists) with password from env'
def add_arguments(self, parser):
parser.add_argument('--email', type=str, default='', dest='email')
parser.add_argument('--password', type=str, default='', dest='pass')
def handle(self, *args, **kwargs):
admin_email = kwargs['email'] if kwargs['email'] else settings.SETUP_ADMIN_EMAIL
admin_password = kwargs['pass'] if kwargs['pass'] else settings.SETUP_ADMIN_PASSWORD
try:
admin_user = User.objects.create_user('admin', admin_email, admin_password)
except:
pass
try:
admin_group = Group.objects.get(name='admin')
staff_group = Group.objects.get(name='staff')
user_group = Group.objects.get(name='user')
admin_user.groups.add(admin_group, staff_group, user_group)
except:
pass
| {
"content_hash": "3be9f9c89ce6b750bb7f93fa9e47547f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 92,
"avg_line_length": 37.172413793103445,
"alnum_prop": 0.6363636363636364,
"repo_name": "GETLIMS/LIMS-Backend",
"id": "a550fece56fca82e912bf98f65e52491a069dc8a",
"size": "1078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lims/shared/management/commands/create_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "474"
},
{
"name": "Python",
"bytes": "231759"
}
],
"symlink_target": ""
} |
"""
Functions for reading and writing NMRPipe files and table (.tab) files
"""
from __future__ import print_function, division
__developer_info__ = """
NMRPipe file structure is described in the NMRPipe man pages and fdatap.h
"""
import struct
import datetime
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from warnings import warn
import numpy as np
from . import fileiobase
from .table import pipe2glue, glue2pipe, guess_pformat
#########################
# table reading/writing #
#########################
def read_table(filename):
"""
Read a NMRPipe database table (.tab) file.
Parameters
----------
filename : str
Filename of NMRPipe table file to read.
Returns
-------
pcomments : list
List of NMRPipe comment lines
pformat: list
List of NMRPipe table column format strings.
rec : recarray
Records array with named fields.
See Also
--------
write_table : Write a NMRPipe table file.
"""
# divide up into comment lines and data lines
specials = ["VARS", "FORMAT", "NULLSTRING", "NULLVALUE", "REMARK", "DATA"]
f = open(filename, 'rb')
cl = []
dl = []
for line in f:
for k in specials:
if line[:len(k)] == k:
cl.append(line)
break
else:
dl.append(line)
f.close()
# pull out and parse the VARS line
vl = [i for i, l in enumerate(cl) if l[:4] == "VARS"]
if len(vl) != 1:
raise IOError("%s has no/more than one VARS line" % (filename))
dtd = {'names': cl.pop(vl[0]).split()[1:]}
# pull out and parse the FORMAT line
fl = [i for i, l in enumerate(cl) if l[:6] == "FORMAT"]
if len(fl) != 1:
raise IOError("%s has no/more than one FORMAT line" % (filename))
pformat = cl.pop(fl[0]).split()[1:]
p2f = {'d': 'i4', 'f': 'f8', 'e': 'f8', 's': 'S256'} # pipe -> format
dtd['formats'] = [p2f[i[-1]] for i in pformat]
# DEBUG
# print(dtd['names'],dtd['formats'])
s = StringIO("".join(dl))
rec = np.recfromtxt(s, dtype=dtd, comments='XXXXXXXXXXX')
return cl, pformat, np.atleast_1d(rec)
def write_table(filename, pcomments, pformats, rec, overwrite=False):
"""
Write a NMRPipe database table (.tab) file.
Parameters
----------
filename : str
Filename of file to write to.
pcomments: list
List of NMRPipe comment lines.
pformats :
List of NMRPipe table column formats strings.
rec : recarray
Records array of table.
overwrite: bool, optional
True to overwrite file if it exists, False will raise a Warning if the
file exists.
See Also
--------
read_table : Read a NMRPipe table file.
"""
if len(rec[0]) != len(pformats):
s = "number of rec columns %i and pformat elements %i do not match"
raise ValueError(s % (len(rec[0]), len(pformats)))
# open the file for writing
f = fileiobase.open_towrite(filename, overwrite)
# write out the VARS line
names = rec.dtype.names
s = "VARS " + " ".join(names) + "\n"
f.write(s)
# write out the FORMAT line
s = "FORMAT " + " ".join(pformats) + "\n"
f.write(s)
# write out any comment lines
for c in pcomments:
f.write(c)
# write out each line of the records array
s = " ".join(pformats) + "\n"
for row in rec:
f.write(s % tuple(row))
f.close()
return
###################
# unit conversion #
###################
def make_uc(dic, data, dim=-1):
"""
Create a unit conversion object
Parameters
----------
dic : dict
Dictionary of NMRPipe parameters.
data : ndarray
Array of NMR data.
dim : int, optional
Dimension number to create unit conversion object for. Default is for
last (direct) dimension.
Returns
-------
uc : unit conversion object
Unit conversion object for given dimension.
"""
if dim == -1:
dim = data.ndim - 1 # last dimention
fn = "FDF" + str(int(dic["FDDIMORDER"][data.ndim - 1 - dim]))
size = float(data.shape[dim])
# check for quadrature in indirect dimentions
if (dic[fn + "QUADFLAG"] != 1) and (dim != data.ndim - 1):
size = size / 2.
cplx = True
else:
cplx = False
sw = dic[fn + "SW"]
if sw == 0.0:
sw = 1.0
obs = dic[fn + "OBS"]
if obs == 0.0:
obs = 1.0
# calculate the carrier from the origin, the left most point which has a
# frequency of CAR*OBS - SW * (N/2 - 1) / 2,
# see Fig 3.1 on p.36 of Hoch and Stern
# The carried should have units on MHz so solve the above for CAR*OBS
orig = dic[fn + "ORIG"]
car = orig + sw / 2. - sw / size
return fileiobase.unit_conversion(size, cplx, sw, obs, car)
############################
# dictionary/data creation #
############################
fd2dphase_dic = {"magnitude": 0, "tppi": 1, "states": 2, "image": 3}
def create_data(data):
"""
Create a NMRPipe data array (recast into float32 or complex64)
"""
if np.iscomplexobj(data): # check quadrature
return np.array(data, dtype="complex64")
else:
return np.array(data, dtype="float32")
########################
# universal dictionary #
########################
def guess_udic(dic, data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of NMRPipe parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# create an empty universal dictionary
udic = fileiobase.create_blank_udic(data.ndim)
# update default values
for i in range(data.ndim):
udic[i]["size"] = data.shape[i] # size from data shape
# determind NMRPipe axis name
fn = "FDF" + str(int(dic["FDDIMORDER"][data.ndim - 1 - i]))
# directly corresponding
udic[i]["sw"] = dic[fn + "SW"]
udic[i]["obs"] = dic[fn + "OBS"]
udic[i]["car"] = dic[fn + "CAR"] * dic[fn + "OBS"] # ppm->hz
udic[i]["label"] = dic[fn + "LABEL"]
if dic[fn + "QUADFLAG"] == 1: # real data
udic[i]["complex"] = False
else:
udic[i]["complex"] = True
if dic[fn + "FTFLAG"] == 0: # time domain
udic[i]["time"] = True
udic[i]["freq"] = False
else:
udic[i]["time"] = False
udic[i]["freq"] = True
if i != 0:
if dic["FD2DPHASE"] == 0:
udic[i]["encoding"] = "magnitude"
elif dic["FD2DPHASE"] == 1:
udic[i]["encoding"] = "tppi"
elif dic["FD2DPHASE"] == 2:
udic[i]["encoding"] = "states"
elif dic["FD2DPHASE"] == 3:
udic[i]["encoding"] = "image"
elif dic["FD2DPHASE"] == 4:
udic[i]["encoding"] = "array"
else:
udic[i]["encoding"] = "unknown"
return udic
def create_dic(udic, datetimeobj=datetime.datetime.now()):
"""
Crate a NMRPipe parameter dictionary from universal dictionary
This function does not update the dictionary keys that are unknown such as
MIN/MAX, apodization and processing parameters, and sizes in none-current
domain. Also rounding of parameter is different than NMRPipe.
Parameters
----------
udic : dict
Universal dictionary of spectral parameters.
datetimeobj : datetime object, optional
Datetime to record in NMRPipe dictionary
Returns
-------
dic : dict
Dictionary NMRPipe parameters.
"""
# create the blank dictionary
dic = create_empty_dic() # create the empty dictionary
dic = datetime2dic(datetimeobj, dic) # add the datetime to the dictionary
# fill global dictionary parameters
dic["FDDIMCOUNT"] = float(udic["ndim"])
# FD2DPHASE
if udic[0]["encoding"] == "tppi":
dic["FD2DPHASE"] = 1.0
elif (udic[0]["encoding"] == "complex" or
udic[0]["encoding"] == "states" or
udic[0]["encoding"] == "states-tppi"):
dic["FD2DPHASE"] = 2.0
else:
dic["FD2DPHASE"] = 0.0
# fill in parameters for each dimension
for i, adic in enumerate([udic[k] for k in range(udic["ndim"])]):
n = int((dic["FDDIMCOUNT"] - 1) - i)
dic = add_axis_to_dic(dic, adic, n)
if dic["FDDIMCOUNT"] >= 3: # at least 3D
dic["FDFILECOUNT"] = dic["FDF3SIZE"] * dic["FDF4SIZE"]
if ((dic["FDF1QUADFLAG"] == dic["FDF2QUADFLAG"] == dic["FDF3QUADFLAG"]) and
(dic["FDF1QUADFLAG"] == dic["FDF4QUADFLAG"] == 1)):
dic["FDQUADFLAG"] = 1.0
return dic
def add_axis_to_dic(dic, adic, n):
"""
Add an axis dictionary (adic) to a NMRPipe dictionary (dic) as axis n.
"""
# determind F1,F2,F3,...
fn = ["FDF2", "FDF1", "FDF3", "FDF4"][n]
# parameter directly in dictionary
dic[fn + "SW"] = float(adic["sw"])
dic[fn + "OBS"] = float(adic["obs"])
dic[fn + "CAR"] = float(adic["car"] / adic["obs"])
dic[fn + "LABEL"] = adic["label"]
if adic["complex"]:
dic[fn + "QUADFLAG"] = 0.0
else:
dic[fn + "QUADFLAG"] = 1.0
# determine R|I size
if adic["complex"] and n != 0:
psize = adic["size"] / 2.
else:
psize = adic["size"] / 1.
# origin calculation size
osize = psize
# set FT/TD SIZE and FTFLAG depending on domain
if adic["time"]:
dic[fn + "TDSIZE"] = psize
dic[fn + "FTFLAG"] = 0.0
else:
dic[fn + "FTSIZE"] = psize
dic[fn + "FTFLAG"] = 1.0
# apodization and center
dic[fn + "APOD"] = dic[fn + "TDSIZE"]
if n == 0 or dic["FD2DPHASE"] != 1:
dic[fn + "CENTER"] = int(psize / 2.) + 1.
else: # TPPI requires division by 4
dic[fn + "CENTER"] = int(psize / 4.) + 1
osize = psize / 2.
# origin (last point) is CAR*OBS-SW*(N/2-1)/N
# see Fig 3.1 on p.36 of Hoch and Stern
# print("fn:",n)
# print("CAR:",dic[fn+"CAR"])
# print("OBS:",dic[fn+"OBS"])
# print("SW:",dic[fn+"SW"])
# print("osize:",osize)
# print("CENTER:",dic[fn+"CENTER"])
dic[fn + "ORIG"] = (dic[fn + "CAR"] * dic[fn + "OBS"] - dic[fn + "SW"] *
(osize - dic[fn + "CENTER"]) / osize)
if n == 0: # direct dim
dic["FDSIZE"] = psize
dic["FDREALSIZE"] = psize
if n == 1: # first indirect
dic["FDSPECNUM"] = float(adic["size"]) # R+I
if adic["encoding"] == 'complex':
dic["FDF1AQSIGN"] = 0
if adic["encoding"] == 'states':
dic["FDF1AQSIGN"] = 0 # should this be 2?
elif adic["encoding"] == 'states-tppi':
dic["FDF1AQSIGN"] = 16
if n == 2: # second indirect
if adic["complex"]:
dic["FDF3SIZE"] = psize * 2
else:
dic["FDF3SIZE"] = psize
if n == 3: # third indirect
if adic["complex"]:
dic["FDF4SIZE"] = psize * 2
else:
dic["FDF3SIZE"] = psize
return dic
def create_empty_dic():
"""
Creates a NMRPipe dictionary with default values
"""
dic = fdata2dic(np.zeros((512), dtype="float32"))
# parameters which are 1
dic["FDF1CENTER"] = 1.
dic["FDF2CENTER"] = 1.
dic["FDF3CENTER"] = 1.
dic["FDF4CENTER"] = 1.
dic["FDF3SIZE"] = 1.
dic["FDF4SIZE"] = 1.
dic["FDF1QUADFLAG"] = 1.
dic["FDF2QUADFLAG"] = 1.
dic["FDF3QUADFLAG"] = 1.
dic["FDF4QUADFLAG"] = 1.
dic["FDSPECNUM"] = 1.
dic["FDFILECOUNT"] = 1.
dic["FD2DVIRGIN"] = 1.
# dimention ordering
dic["FDDIMORDER1"] = 2.0
dic["FDDIMORDER2"] = 1.0
dic["FDDIMORDER3"] = 3.0
dic["FDDIMORDER4"] = 4.0
dic["FDDIMORDER"] = [2.0, 1.0, 3.0, 4.0]
# string and such
dic["FDF1LABEL"] = "Y"
dic["FDF2LABEL"] = "X"
dic["FDF3LABEL"] = "Z"
dic["FDF4LABEL"] = "A"
# misc values
dic["FDFLTFORMAT"] = struct.unpack('f', b'\xef\xeenO')[0]
dic["FDFLTORDER"] = float(2.3450000286102295)
return dic
def datetime2dic(dt, dic):
"""
Add datatime object to a NMRPipe dictionary
"""
dic["FDYEAR"] = float(dt.year)
dic["FDMONTH"] = float(dt.month)
dic["FDDAY"] = float(dt.day)
dic["FDHOURS"] = float(dt.hour)
dic["FDMINS"] = float(dt.minute)
dic["FDSECS"] = float(dt.second)
return dic
def dic2datetime(dic):
"""
Create a datetime object from a NMRPipe dictionary
"""
year = int(dic["FDYEAR"])
month = int(dic["FDMONTH"])
day = int(dic["FDDAY"])
hour = int(dic["FDHOURS"])
minute = int(dic["FDMINS"])
second = int(dic["FDSECS"])
return datetime.datetime(year, month, day, hour, minute, second)
################
# file reading #
################
def read(filename):
"""
Read a NMRPipe file.
For standard multi-file 3D/4D NMRPipe data sets, filename should be a
filemask (for example "/ft/test%03d.ft3") with a "%" formatter. If only
one file of a 3D/4D data set is provided only that 2D slice of the data is
read (for example "/ft/test001.ft3" results in a 2D data set being read).
NMRPipe data streams stored as files (one file 3D/4D data sets made using
xyz2pipe) can be read by providing the file name of the stream. The entire
data set is read into memory.
Parameters
----------
filename : str
Filename or filemask of NMRPipe file(s) to read.
Returns
--------
dic : dict
Dictionary of NMRPipe parameters.
data : ndarray
Array of NMR data.
See Also
--------
read_lowmem : NMRPipe file reading with minimal memory usage.
write : Write a NMRPipe data to file(s).
"""
if filename.count("%") == 1:
filemask = filename
filename = filename % 1
elif filename.count("%") == 2:
filemask = filename
filename = filename % (1, 1)
else:
filemask = None
fdata = get_fdata(filename)
dic = fdata2dic(fdata)
order = dic["FDDIMCOUNT"]
if order == 1:
return read_1D(filename)
if order == 2:
return read_2D(filename)
if dic["FDPIPEFLAG"] != 0: # open streams
return read_stream(filename)
if filemask is None: # if no filemask open as 2D
return read_2D(filename)
if order == 3:
return read_3D(filemask)
if order == 4:
return read_4D(filemask)
raise ValueError('unknown dimensionality: %s' % order)
def read_lowmem(filename):
"""
Read a NMRPipe file with minimal memory usage.
See :py:func:`read` for Parameters and information.
Returns
-------
dic : dict
Dictionary of NMRPipe parameters.
data : array_like
Low memory object which can access NMR data on demand.
See Also
--------
read : Read NMRPipe files.
write_lowmem : Write NMRPipe files using minimal amounts of memory.
"""
if filename.count("%") == 1:
filemask = filename
filename = filename % 1
elif filename.count("%") == 2:
filemask = filename
filename = filename % (1, 1)
else:
filemask = None
fdata = get_fdata(filename)
dic = fdata2dic(fdata)
order = dic["FDDIMCOUNT"]
if order == 1:
return read_1D(filename) # there is no 1D low memory option
if order == 2:
return read_lowmem_2D(filename)
if dic["FDPIPEFLAG"] != 0: # open streams
return read_lowmem_stream(filename)
if filemask is None: # if no filemask open as 2D
return read_lowmem_2D(filename)
if order == 3:
return read_lowmem_3D(filemask)
if order == 4:
return read_lowmem_4D(filemask)
raise ValueError('unknown dimentionality: %s' % order)
# dimension specific reading
def read_1D(filename):
"""
Read a 1D NMRPipe file.
See :py:func:`read` for documentation.
"""
fdata, data = get_fdata_data(filename) # get the fdata and data arrays
dic = fdata2dic(fdata) # convert the fdata block to a python dictionary
data = reshape_data(data, find_shape(dic)) # reshape data
# unappend imaginary data if needed
if dic["FDF2QUADFLAG"] != 1:
data = unappend_data(data)
return (dic, data)
def read_2D(filename):
"""
Read a 2D NMRPipe file or NMRPipe data stream.
See :py:func:`read` for documentation.
"""
fdata, data = get_fdata_data(filename) # get the fdata and data arrays
dic = fdata2dic(fdata) # convert the fdata block to a python dictionary
data = reshape_data(data, find_shape(dic)) # reshape data
# unappend imaginary data if needed
if dic["FDTRANSPOSED"] == 1 and dic["FDF1QUADFLAG"] != 1:
data = unappend_data(data)
elif dic["FDTRANSPOSED"] == 0 and dic["FDF2QUADFLAG"] != 1:
data = unappend_data(data)
return (dic, data)
def read_lowmem_2D(filename):
"""
Read a 2D NMRPipe file or NMRPipe data stream using minimal memory.
See :py:func:`read_lowmem` for documentation
"""
dic = fdata2dic(get_fdata(filename))
order = dic["FDDIMCOUNT"]
if order == 2:
data = pipe_2d(filename)
if order == 3:
data = pipestream_3d(filename)
if order == 4:
data = pipestream_4d(filename)
return dic, data
def read_stream(filename):
"""
Read a NMRPipe data stream (one file 3D or 4D files).
See :py:func:`read` for documentation.
"""
return read_2D(filename)
def read_lowmem_stream(filename):
"""
Read a NMRPipe data stream using minimal memory.
See :py:func:`read_lowmem` for documentation.
"""
return read_lowmem_2D(filename)
def read_3D(filemask):
"""
Read a 3D NMRPipe file.
See :py:func:`read` for documentation.
"""
dic, data = read_lowmem_3D(filemask)
data = data[:, :, :] # read all the data
return dic, data
def read_lowmem_3D(filemask):
"""
Read a 3D NMRPipe file using minimal memory.
See :py:func:`read_lowmem` for documentation
"""
if '%' not in filemask: # data streams should be read with read_stream
return read_lowmem_stream(filemask)
data = pipe_3d(filemask) # create a new pipe_3d object
dic = fdata2dic(get_fdata(filemask % (1)))
return dic, data
def read_4D(filemask):
"""
Read a 3D NMRPipe file.
See :py:func:`read` for documentation.
Notes
-----
This function should not be used to read NMRPipe data streams stored in a
single file (one file 3D/4D data sets made using xyz2pipe),
:py:func:`read_2D` should be used.
"""
dic, data = read_lowmem_4D(filemask)
data = data[:, :, :, :] # read all the data
return dic, data
def read_lowmem_4D(filemask):
"""
Read a NMRPipe file using minimal memory.
See :py:func:`read_lowmem` for documentation
Notes
-----
This function should not be used to read NMRPipe data streams stored in a
single file (one file 3D/4D data sets made using xyz2pipe),
:py:func:`read_lowmem_2D` should be used.
"""
if '%' not in filemask: # data streams should be read with read_stream
return read_lowmem_stream(filemask)
data = pipe_4d(filemask) # create a new pipe_3d object
if data.singleindex:
dic = fdata2dic(get_fdata(filemask % (1)))
else:
dic = fdata2dic(get_fdata(filemask % (1, 1)))
return (dic, data)
#####################
# writing functions #
#####################
def write(filename, dic, data, overwrite=False):
"""
Write a NMRPipe file to disk.
Parameters
----------
filename : str
Filename of NMRPipe to write to. See Notes.
dic : dict
Dictionary of NMRPipe parameters.
data : array_like
Array of NMR data.
overwrite : bool, optional.
Set True to overwrite files, False will raise a Warning if file
exists.
Notes
-----
For 3D data if filename has no '%' formatter then the data is written as a
3D NMRPipe data stream. When the '%' formatter is provided the data is
written out as a standard NMRPipe 3D multi-file 3D.
For 4D data, filename can have one, two or no '%' formatters resulting in
a single index file (test%03d.ft), two index file(test%02d%03d.ft), or
one file data stream (test.ft4).
dic["FDPIPEFLAG"] is not changed or checked when writing, please check
that this value is 0.0 for standard non-data stream files, and 1.0 for data
stream files or an file may be written with an incorrect header.
Set overwrite to True to overwrite files that exist.
See Also
--------
write_lowmem : Write NMRPipe files using minimal amounts of memory.
read : Read NMRPipe files.
"""
# load all data if the data is not a numpy ndarray
if not isinstance(data, np.ndarray):
data = data[:]
if filename.count("%") == 0:
return write_single(filename, dic, data, overwrite)
elif data.ndim == 3:
return write_3D(filename, dic, data, overwrite)
elif data.ndim == 4:
return write_4D(filename, dic, data, overwrite)
raise ValueError('unknown filename/dimension')
def write_single(filename, dic, data, overwrite=False):
"""
Write data to a single NMRPipe file from memory.
Write 1D and 2D files completely as well as NMRPipe data streams.
2D planes of 3D and 4D files should be written with this function.
See :py:func:`write` for documentation.
"""
# append imaginary and flatten
if data.dtype == "complex64":
data = append_data(data)
data = unshape_data(data)
# create the fdata array
fdata = dic2fdata(dic)
# write the file
put_data(filename, fdata, data, overwrite)
return
def write_3D(filemask, dic, data, overwrite=False):
"""
Write a standard multi-file 3D NMRPipe file
See :py:func:`write` for documentation.
"""
lenZ, lenY, lenX = data.shape
for zi in range(lenZ):
fn = filemask % (zi + 1)
plane = data[zi]
write_single(fn, dic, plane, overwrite)
return
def write_4D(filemask, dic, data, overwrite=False):
"""
Write a one or two index 4D NMRPipe file.
See :py:func:`write` for documentation.
"""
lenA, lenZ, lenY, lenX = data.shape
for ai in range(lenA):
for zi in range(lenZ):
if filemask.count("%") == 2:
fn = filemask % (ai + 1, zi + 1)
else:
fn = filemask % (ai * lenZ + zi + 1)
plane = data[ai, zi]
# update dictionary if needed
if dic["FDSCALEFLAG"] == 1:
dic["FDMAX"] = plane.max()
dic["FDDISPMAX"] = dic["FDMAX"]
dic["FDMIN"] = plane.min()
dic["FDDISPMIN"] = dic["FDMIN"]
write_single(fn, dic, plane, overwrite)
return
def write_lowmem(filename, dic, data, overwrite=False):
"""
Write a NMRPipe file to disk using minimal memory (trace by trace).
Parameters
----------
filename : str
Filename of NMRPipe to write to. See :py:func:`write` for details.
dic : dict
Dictionary of NMRPipe parameters.
data : array_like
Array of NMR data.
overwrite : bool, optional.
Set True to overwrite files, False will raise a Warning if file
exists.
See Also
--------
write : Write a NMRPipe file to disk.
read_lowmem : Read a NMRPipe file using minimal memory.
"""
if data.ndim == 1:
return write_single(filename, dic, data, overwrite)
if data.ndim == 2:
return write_lowmem_2D(filename, dic, data, overwrite)
if data.ndim == 3:
if "%" in filename:
return write_lowmem_3D(filename, dic, data, overwrite)
else:
return write_lowmem_3Ds(filename, dic, data, overwrite)
if data.ndim == 4:
if "%" in filename:
return write_lowmem_4D(filename, dic, data, overwrite)
else:
return write_lowmem_4Ds(filename, dic, data, overwrite)
raise ValueError('unknown dimensionality: %s' % data.ndim)
def write_lowmem_2D(filename, dic, data, overwrite=False):
"""
Write a 2D NMRPipe file using minimal memory (trace by trace)
See :py:func:`write_lowmem` for documentation.
"""
fh = fileiobase.open_towrite(filename, overwrite=overwrite)
# create the fdata array and put to disk
fdata = dic2fdata(dic)
put_fdata(fh, fdata)
# put data trace by trace
lenY, lenX = data.shape
for y in range(lenY):
put_trace(fh, data[y])
fh.close()
return
def write_lowmem_3D(filename, dic, data, overwrite=False):
"""
Write a standard multi-file 3D NMRPipe file using minimal memory.
See :py:func:`write_lowmem` for documentation.
Notes
-----
MIN/MAX parameters are not updated in the NMRPipe headers.
"""
# create the fdata array
fdata = dic2fdata(dic)
# put data trace by trace
lenZ, lenY, lenX = data.shape
for z in range(lenZ):
# open the file to store the 2D plane
fh = fileiobase.open_towrite(filename % (z + 1), overwrite=overwrite)
put_fdata(fh, fdata)
for y in range(lenY):
put_trace(fh, data[z, y])
fh.close()
return
def write_lowmem_3Ds(filename, dic, data, overwrite=False):
"""
Write 3D NMRPipe data stream file using minimal memory (trace by trace)
See :py:func:`write_lowmem` for documentation.
"""
fh = fileiobase.open_towrite(filename, overwrite=overwrite)
# create the fdata array and put to disk
fdata = dic2fdata(dic)
put_fdata(fh, fdata)
# put data trace by trace
lenZ, lenY, lenX = data.shape
for z in range(lenZ):
for y in range(lenY):
put_trace(fh, data[z, y])
fh.close()
return
def write_lowmem_4D(filename, dic, data, overwrite=False):
"""
Write a multi-file (single or double index) 4D NMRPipe file using
minimal memory.
See :py:func:`write_lowmem` for documentation.
Notes
-----
MIN/MAX parameters are not updated in the NMRPipe headers.
"""
# create the fdata array
fdata = dic2fdata(dic)
# put data trace by trace
lenA, lenZ, lenY, lenX = data.shape
for a in range(lenA):
for z in range(lenZ):
# open the file to store the 2D plane
if filename.count("%") == 1:
fname = filename % (a * lenZ + z + 1)
else:
fname = filename % (a + 1, z + 1)
fh = fileiobase.open_towrite(fname, overwrite=overwrite)
put_fdata(fh, fdata)
for y in range(lenY):
put_trace(fh, data[a, z, y])
fh.close()
return
def write_lowmem_4Ds(filename, dic, data, overwrite=False):
"""
Write 4D NMRPipe data stream file using minimal memory (trace by trace)
See :py:func:`write_lowmem` for documentation.
"""
fh = fileiobase.open_towrite(filename, overwrite=overwrite)
# create the fdata array and put to disk
fdata = dic2fdata(dic)
put_fdata(fh, fdata)
# put data trace by trace
lenA, lenZ, lenY, lenX = data.shape
for a in range(lenA):
for z in range(lenZ):
for y in range(lenY):
put_trace(fh, data[a, z, y])
fh.close()
return
###############
# put to disk #
###############
def put_fdata(fh, fdata):
"""
Put NMR data, fdata, to a NMRPipe file described by file object fh.
"""
if fdata.dtype != 'float32':
raise TypeError('fdata.dtype is not float32')
fh.write(fdata.tostring())
return
def put_trace(fh, trace):
"""
Put a trace (real or complex) to NMRPipe file described by file object fh.
"""
if trace.dtype == 'complex64':
trace = append_data(trace)
if trace.dtype != 'float32':
raise TypeError('trace.dtype is not float32')
fh.write(trace.tostring())
return
def put_data(filename, fdata, data, overwrite=False):
"""
Put fdata and data to 2D NMRPipe.
"""
if data.dtype != 'float32':
# print(data.dtype)
raise TypeError('data.dtype is not float32')
if fdata.dtype != 'float32':
raise TypeError('fdata.dtype is not float32')
# write the file
f = fileiobase.open_towrite(filename, overwrite=overwrite)
f.write(fdata.tostring())
f.write(data.tostring())
f.close()
return
def write_slice_3D(filemask, dic, data, shape, slices):
"""
Write a slice of a 3D data array to file.
Opens (or if necessary creates) a 2D NMRPipe file(s) to write
data, where the total 3D file size is given by shape.
Parameters
----------
filemask : str
String of NMRPipe file with single formatting operator (%).
dic : dict
Dictionary of NMRPipe parameters.
data : ndarray
3D array of NMR data.
shape : tuple
Tuple of 3 integers indicating the overall matrix shape.
(sz, sy, sx) : slices
Slice objects which specify the location of the to be written data.
Notes
-----
This function memmaps 2D NMRPipe files for speed. It only writes
dictionaries to file when created, leaving them unmodified if the file
exists. Only error checking is that data is 3D.
See Also
--------
iter3D : Users should use this object, not this function.
"""
sz, sy, sx = slices
if data.ndim != 3:
raise ValueError("passed array must be 3D")
# unpack the shape
dz, dy, dx = shape
# create list of file names
fnames = [filemask % i for i in range(1, dz + 1)]
# loop over the requested z-slice
for i, f in enumerate(fnames[sz]):
# print("i:",i,"f:",f)
if os.path.isfile(f) is False:
# file doesn't exist, create a empty one
ndata = np.zeros((dy, dx), dtype=data.dtype)
write_single(f, dic, data, False)
del(ndata)
# mmap the [new] file
mdata = np.memmap(f, dtype='float32', offset=512 * 4, mode='r+')
# reshape
mdata = mdata.reshape((dy, dx))
# unpack into rdata,[idata] depending on quadrature
if data.dtype == 'complex64':
h = mdata.shape[-1] / 2.0
rdata = mdata[..., :h]
idata = mdata[..., h:]
else:
rdata = mdata
# write the data out, flush and close
rdata[sy, sx] = data.real[i]
rdata.flush()
if data.dtype == 'complex64':
idata[sy, sx] = data.imag[i]
idata.flush()
del(idata)
# clean up
del(rdata)
del(mdata)
# iter3D tools (xyz2pipe and pipe2xyz replacements)
# Notes for iter3D implementation
#
# 'x'/'y' in_lead
# ==============
# Reading
# -------
# - When passed x must transposed 1,2 if dic["FDTRANSPOSED"] == 1
# (might need to call pipe_proc.tp)
# - if 'y' passed then cann pipe_proc.tp unless dic["FDTRANSPOED"]
# - save 'good' dictionary and return each loop
#
# Looping
# -------
# - will loop until data.shape[0] reached
# - returns dic, XY or YX plane
#
# Writing
# -------
# - if 'y' out then need final pipe_proc.tp of data, if 'x' do nothing
# - reshape data to 1,plane.shape[0],plane.shape[1]
# - size becomes data.shape[0],plane.shape[0],plane.shape[1]
# - sz = slice(i,i+1,1) sy=sx=slice(None)
#
# 'z' in_lead
# ===========
# Reading
# -------
# - Untranspose if dic["TRANSPOSED"] == 1 (call pipe_proc.tp)
# - transpose (1,2,0)
# - ORDER 1,2,3 = 3,1,2 and array
# - update "FDSLICECOUNT" and "FDSIZE" taking into accound complex packing
# - also update "FDSPECNUM"
# - call write_slice3D
# - store shape as self.max_iter
#
# Looping
# -------
# - grab the slice and pack_complex if needed
# - returns dic,ZX-plane
#
# Writing
# -------
# - if out_lead = 'x' needs final pipe_proc.tp of data, if 'z' do nothing
# - reshape data to 1,plane.shape[0],plane.shape[1]
# - transposed data to 2,0,1 (or combine with above step
# - update "FDSIZE" and "FDSPECNUM"
# - remove min/max
# - update FDDIMORDER and ORDER1,2,3
# - size plane.shape[0],self.max_iter,plane.shape[2]
# - sz = slice(None)=sx
# - sy = slice(i,i+1,1)
def pack_complex(data):
"""
Pack inteleaved real,imag array into complex array.
"""
return np.array(data[..., ::2] + data[..., 1::2] * 1.j, dtype="complex64")
def transpose_3D(dic, data, axes=(2, 1, 0)):
"""
Transpose pipe_3d object and dictionary
"""
a1, a2, a3 = axes
rdic = dict(dic) # create a copy of the dictionary
# transpose the data
data = data.transpose((a1, a2, a3))
# transpose the dictionary
s3 = "FDDIMORDER" + str(int(3 - a1)) # 3rd axis is 0th axis in data_nd
s2 = "FDDIMORDER" + str(int(3 - a2)) # 2nd axis is 1st axis in data_nd
s1 = "FDDIMORDER" + str(int(3 - a3)) # 1st axis is 3nd axis in data_nd
rdic["FDDIMORDER1"] = dic[s1]
rdic["FDDIMORDER2"] = dic[s2]
rdic["FDDIMORDER3"] = dic[s3]
rdic['FDDIMORDER'] = [rdic["FDDIMORDER1"], rdic["FDDIMORDER2"],
rdic["FDDIMORDER3"], rdic["FDDIMORDER4"]]
# set the shape dictionary parameters
fn = "FDF" + str(int(rdic["FDDIMORDER1"]))
if rdic[fn + "QUADFLAG"] != 1.0: # last axis is complex
rdic["FDSIZE"] = data.shape[2] / 2.
else: # last axis is singular
rdic["FDSIZE"] = data.shape[2]
rdic["FDSLICECOUNT"] = data.shape[1]
rdic["FDSPECNUM"] = rdic["FDSLICECOUNT"]
return rdic, data
class iter3D(object):
"""
Object which allows for graceful iteration over 3D NMRPipe files.
iter3D.iter() returns a (dic,plane) tuple which can be written using
the x.writeplane function.
When processing 3D files with iter3D object(s) the following dictionary
parameters may not have the same values as NMRPipe processing scripts
return:
* FDSLICECOUNT
* FDMAX,FDDISMAX,FDMIN,FDDISPMIN when FDSCALEFLAG == 0
Example::
#3D data processing
xiter = iter3D("data/test%03d.fid","x","x")
for dic,YXplane in xiter:
# process X and Y axis
xiter.write("ft/test%03d.ft2",YXplane,dic)
ziter = iter3D("ft/test%03d.ft2","z","z")
for dic,XZplane in ziter:
# process Z axis
ziter.write("ft/test%03d.ft3",XZplane,dic)
"""
def __init__(self, filemask, in_lead="x", out_lead="DEFAULT"):
"""
Create a iter3D object
Parameters
----------
filemask : str
String file with single formatter (%) which represents which
indicates which NMRPipe file to read.
in_lead : ('x', 'y', 'z'), optional
Axis name of last (1st) axis in outputted 2D
out_lead : ('x', 'y', 'z', 'DEFAULT'), optional
Axis name of axis to be written, typically this is the same as
in_load, which is the used if 'DEFAULT' is given.
Notes
-----
======= ===============
In-lead Iterated Planes
======= ===============
"x" ('y','x')
"y" ('x','y')
"z" ('x','z')
======= ===============
"""
# check for invalid in_lead, out_lead
if in_lead not in ["x", "y", "z"]:
raise ValueError("in_lead must be 'x','y' or 'z'")
if out_lead not in ["x", "y", "z", "DEFAULT"]:
raise ValueError("out_lead must be 'x','y','z' or 'DEFAULT'")
if out_lead == "DEFAULT":
out_lead = in_lead
if in_lead in ["x", "y"] and out_lead not in ["x", "y"]:
raise ValueError("Invalid in_lead, out_lead pair")
if in_lead == "z" and out_lead not in ["x", "z"]:
raise ValueError("Invalid in_lead, out_lead pair")
self.in_lead = in_lead
self.out_lead = out_lead
self.dic, self.pipe_3d = read_3D(filemask)
# uptranspose data if needed
if self.dic["FDTRANSPOSED"] == 1.0:
# need to switch X and Y (0,2,1)
self.dic, self.pipe_3d = transpose_3D(self.dic, self.pipe_3d,
(0, 2, 1))
# self.pipe_3d and self.dic are now REALLY ZYX order
# now prep pipe_3d for slicing and make idic the iterator dictionary
self.i = -1 # counter
if self.in_lead == "x":
# leave as is Z(YX)
self.needs_pack_complex = False
self.idic = dict(self.dic)
self.i_max = int(self.pipe_3d.shape[0])
elif self.in_lead == "y":
# transpose to Z(XY)
self.idic, self.pipe_3d = transpose_3D(self.dic, self.pipe_3d,
(0, 2, 1))
self.needs_pack_complex = False
self.i_max = int(self.pipe_3d.shape[0])
elif self.in_lead == "z":
# transpose to Y(XZ)
self.idic, self.pipe_3d = transpose_3D(self.dic, self.pipe_3d,
(1, 2, 0))
fn = "FDF" + str(int(self.idic["FDDIMORDER1"]))
if self.idic[fn + "QUADFLAG"] != 1.0: # z axis is complex
self.needs_pack_complex = True
else:
self.needs_pack_complex = False
self.i_max = int(self.pipe_3d.shape[0])
else:
raise ValueError("Invalid in_lead") # this should never be raised.
def __iter__(self):
"""
x.__iter__() <==> iter(x)
"""
return self
def __next__(self):
""" next iterator. """
return self.next()
def next(self):
"""
Return the next dic, plane or raise StopIteration
"""
self.i = self.i + 1
if self.i >= self.i_max:
raise StopIteration
else:
plane = self.pipe_3d[self.i]
if self.needs_pack_complex:
plane = pack_complex(plane)
return (dict(self.idic), plane)
def reinitialize(self):
"""
Restart iterator at first dic,plane.
"""
self.i = -1
def write(self, filemask, plane, dic):
"""
Write out current plane.
"""
# make the plane a 3D array
plane = plane.reshape(1, plane.shape[0], plane.shape[1])
if self.in_lead != self.out_lead:
# transpose the last two axes
dic, plane = transpose_3D(dic, plane, (0, 2, 1))
if self.in_lead == "x" or self.in_lead == "y":
shape = (self.i_max, plane.shape[1], plane.shape[2])
sz = slice(self.i, self.i + 1, 1)
sx = slice(None)
sy = slice(None)
elif self.in_lead == "z":
# reorder from YXZ -> ZYX
dic, plane = transpose_3D(dic, plane, (2, 0, 1))
# turn scale flag off
dic["FDSCALEFLAG"] = 0.0
# the Y size is incorrect
dic["FDSPECNUM"] = self.i_max
# update the file count
# XXX these could be done better
dic["FDFILECOUNT"] = plane.shape[0]
dic["FDF3SIZE"] = plane.shape[0]
shape = (plane.shape[0], self.i_max, plane.shape[2])
sx = slice(None)
sy = slice(self.i, self.i + 1, 1)
sz = slice(None)
else:
raise ValueError("invalid in_lead") # this should never be raised
# DEBUGGING
# print("Writing out slice :",self.i)
# print("shape:",shape)
# print("plane.shape",plane.shape)
# print("sx,sy,sz",sx,sy,sz)
# print(dic["FDFILECOUNT"])
write_slice_3D(filemask, dic, plane, shape, (sz, sy, sx))
#####################
# Shaping functions #
#####################
def find_shape(dic):
"""
Find the shape (tuple) of data in a NMRPipe file from parameters.
1-tuple is returned for 1D data, 2-tuple for 2D and non-stream 3D/4D data,
3-tuple or 4-tuple for stream 3D/4D data.
The last dimension of the tuple is length of the data in the file, the
actual length of the data matrix may be half of this if the data is
complex.
"""
if dic["FDDIMCOUNT"] == 1: # 1D Data
if dic["FDF2QUADFLAG"] == 1:
multi = 1.0
else:
multi = 2.0
dim1 = int(dic["FDSIZE"] * multi)
return (dim1)
else: # 2D+ Data
if dic["FDF1QUADFLAG"] == 1 and dic["FDTRANSPOSED"] == 1:
multi = 1.0
elif dic["FDF2QUADFLAG"] == 1 and dic["FDTRANSPOSED"] == 0:
multi = 1.0
else:
multi = 2.0
dim1 = int(dic["FDSIZE"] * multi)
dim2 = int(dic["FDSPECNUM"])
# when the direct dim is singular and the indirect
# dim is complex FDSPECNUM is half of the correct value
if dic["FDQUADFLAG"] == 0 and multi == 1.0:
dim2 = dim2 * 2
# check for 3D/4D data stream format files (made using xyz2pipe)
if dic["FDDIMCOUNT"] == 3 and dic["FDPIPEFLAG"] != 0:
dim3 = int(dic["FDF3SIZE"])
return (dim3, dim2, dim1)
if dic["FDDIMCOUNT"] == 4 and dic["FDPIPEFLAG"] != 0:
dim3 = int(dic["FDF3SIZE"])
dim4 = int(dic["FDF4SIZE"])
return (dim4, dim3, dim2, dim1)
return (dim2, dim1)
def reshape_data(data, shape):
"""
Reshape data or return 1D data after warning.
"""
try:
return data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
return data
def unshape_data(data):
"""
Return 1D version of data.
"""
return data.flatten()
def unappend_data(data):
"""
Return complex data with last axis (-1) unappended.
Data should have imaginary data vector appended to real data vector
"""
h = data.shape[-1] / 2.0
return np.array(data[..., :h] + data[..., h:] * 1.j, dtype="complex64")
def append_data(data):
"""
Return data with last axis (-1) appeneded.
Data should be complex
"""
return np.concatenate((data.real, data.imag), axis=-1)
###################
# fdata functions #
###################
def fdata2dic(fdata):
"""
Convert a fdata array to fdata dictionary.
Converts the raw 512x4-byte NMRPipe header into a python dictionary
with keys as given in fdatap.h
"""
dic = dict()
# Populate the dictionary with FDATA which contains numbers
for key in fdata_dic.keys():
dic[key] = float(fdata[int(fdata_dic[key])])
# make the FDDIMORDER
dic["FDDIMORDER"] = [dic["FDDIMORDER1"], dic["FDDIMORDER2"],
dic["FDDIMORDER3"], dic["FDDIMORDER4"]]
def _unpack_str(fmt, d):
return struct.unpack(fmt, d)[0].decode().strip('\x00')
# Populate the dictionary with FDATA which contains strings
dic["FDF2LABEL"] = _unpack_str('8s', fdata[16:18])
dic["FDF1LABEL"] = _unpack_str('8s', fdata[18:20])
dic["FDF3LABEL"] = _unpack_str('8s', fdata[20:22])
dic["FDF4LABEL"] = _unpack_str('8s', fdata[22:24])
dic["FDSRCNAME"] = _unpack_str('16s', fdata[286:290])
dic["FDUSERNAME"] = _unpack_str('16s', fdata[290:294])
dic["FDTITLE"] = _unpack_str('60s', fdata[297:312])
dic["FDCOMMENT"] = _unpack_str('160s', fdata[312:352])
dic["FDOPERNAME"] = _unpack_str('32s', fdata[464:472])
return dic
def dic2fdata(dic):
"""
Converts a NMRPipe dictionary into an array.
"""
# A 512 4-byte array to hold the nmrPipe header data
fdata = np.zeros(512, 'float32')
# Populate the array with the simple numbers
for key in fdata_nums.keys():
fdata[int(fdata_dic[key])] = float(dic[key])
# Check that FDDIMORDER didn't overwrite FDDIMORDER1
fdata[int(fdata_dic["FDDIMORDER1"])] = dic["FDDIMORDER1"]
# Pack the various strings into terminated strings of the correct length
# then into floats in the fdata array
fdata[16:18] = struct.unpack(
'2f', struct.pack('8s', dic["FDF2LABEL"].encode()))
fdata[18:20] = struct.unpack(
'2f', struct.pack('8s', dic["FDF1LABEL"].encode()))
fdata[20:22] = struct.unpack(
'2f', struct.pack('8s', dic["FDF3LABEL"].encode()))
fdata[22:24] = struct.unpack(
'2f', struct.pack('8s', dic["FDF4LABEL"].encode()))
# and the longer strings (typically blank)
fdata[286:290] = struct.unpack(
'4f', struct.pack('16s', dic["FDSRCNAME"].encode()))
fdata[290:294] = struct.unpack(
'4f', struct.pack('16s', dic["FDUSERNAME"].encode()))
fdata[297:312] = struct.unpack(
'15f', struct.pack('60s', dic["FDTITLE"].encode()))
fdata[312:352] = struct.unpack(
'40f', struct.pack('160s', dic["FDCOMMENT"].encode()))
fdata[464:472] = struct.unpack(
'8f', struct.pack('32s', dic["FDOPERNAME"].encode()))
return fdata
#################################
# raw reading of data from file #
#################################
def get_fdata(filename):
"""
Get an array of length 512-bytes holding NMRPipe header.
"""
fdata = np.fromfile(filename, 'float32', 512)
if fdata[2] - 2.345 > 1e-6: # fdata[2] should be 2.345
fdata = fdata.byteswap()
return fdata
def get_data(filename):
"""
Get array of data
"""
data = np.fromfile(filename, 'float32')
if data[2] - 2.345 > 1e-6: # check for byteswap
data = data.byteswap()
return data[512:]
def get_fdata_data(filename):
"""
Get fdata and data array, return (fdata, data)
"""
data = np.fromfile(filename, 'float32')
if data[2] - 2.345 > 1e-6: # check for byteswap
data = data.byteswap()
return data[:512], data[512:]
##############################################
# low memory numpy.ndarray emulating objects #
##############################################
def get_trace(fhandle, ntrace, pts, bswap, cplex):
"""
Get a single trace from a NMRPipe file
Parameters
----------
fhandle : file object
File object of open NMRPipe file.
ntrace : int
Trace numbers (starting from 0).
pts : int
Number of points in trace, R|I.
bswap : bool
True to perform byteswap on trace.
cplex : bool
True to unappend imaginary data.
"""
if cplex:
tpts = pts * 2 # read twice as many points if data is complex
else:
tpts = pts
fhandle.seek(4 * (512 + ntrace * tpts)) # seek to the start of the trace
trace = np.fromfile(fhandle, 'float32', tpts)
if bswap:
trace = trace.byteswap()
if cplex:
return unappend_data(trace)
else:
return trace
class pipe_2d(fileiobase.data_nd):
"""
Emulate a ndarray objects without loading data into memory for low memory
reading of 2D NMRPipe files.
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filename : str
Filename of 2D NMRPipe file.
order : tuple
Ordering of axes against file.
"""
def __init__(self, filename, order=(0, 1)):
"""
Create and set up object
"""
# read and parse the NMRPipe header
fdata = get_fdata(filename) # get the header data
if fdata[2] - 2.345 > 1e-6: # check if byteswapping will be necessary
self.bswap = True
else:
self.bswap = False
dic = fdata2dic(fdata) # create the dictionary
fshape = list(find_shape(dic))
# set object attributes
self.filename = filename
self.order = order
# check last axis quadrature
fn = "FDF" + str(int(dic["FDDIMORDER1"]))
if dic[fn + "QUADFLAG"] == 1.0:
self.cplex = False
self.dtype = np.dtype('float32')
else:
self.cplex = True
self.dtype = np.dtype('complex64')
fshape[1] = fshape[1] // 2
# finalize
self.fshape = tuple(fshape)
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = pipe_2d(self.filename, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values.
(sY, sX) is a well formated tuple of slices
"""
sY, sX = slices
f = open(self.filename, 'rb') # open the file for reading
# determine which objects should be selected
lenY, lenX = self.fshape
xch = range(lenX)[sX]
ych = range(lenY)[sY]
# create an empty array to store the selected slice
out = np.empty((len(ych), len(xch)), dtype=self.dtype)
# read in the data trace by trace
for yi, y in enumerate(ych):
ntrace = y
trace = get_trace(f, ntrace, lenX, self.bswap, self.cplex)
out[yi] = trace[sX]
f.close()
return out
# There are two types of NMRPipe 3D files:
# 1) streams which are single file data sets made with xyz2pipe.
# 2) multiple file data test, names test%03d.ft3, etc.
# Low memory objects exist for both, choose the correct one, or let read
# do it for you.
class pipe_3d(fileiobase.data_nd):
"""
Emulate a ndarray objects without loading data into memory for low memory
reading of 3D NMRPipe files (multiple file data sets).
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filemask : str
Filename of 3D NMRPipe file. Should contain one formatter '%'
operator.
order : tuple
Ordering of axes against file.
fcheck : bool, optional.
True to perform a basic check to see if all files expected for the data
set exist. Raises a IOError if files are missing. Default is False.
"""
def __init__(self, filemask, order=(0, 1, 2), fcheck=False):
"""
Create and set up object, check that files exist if fcheck is True
"""
filename = filemask % 1
# read and parse the NMRPipe header in the first file of the 3D
fdata = get_fdata(filename) # get the header data
if fdata[2] - 2.345 > 1e-6: # check if byteswapping will be necessary
self.bswap = True
else:
self.bswap = False
# find the shape of the first two dimensions
dic = fdata2dic(fdata) # create the dictionary
fshape = list(find_shape(dic))[-2:]
# find the length of the third dimension
f3 = "FDF" + str(int(dic["FDDIMORDER3"]))
lenZ = int(dic[f3 + "SIZE"])
fshape.insert(0, lenZ) # insert as leading size of fshape
# check that all files exist if fcheck is set
if fcheck:
for i in range(1, lenZ + 1):
if os.path.exists(filemask % i) is False:
raise IOError("File not found: " + str(filemask % i))
# check last axis quadrature
fn = "FDF" + str(int(dic["FDDIMORDER1"]))
if dic[fn + "QUADFLAG"] == 1.0:
self.cplex = False
self.dtype = np.dtype('float32')
else:
self.cplex = True
self.dtype = np.dtype('complex64')
fshape[2] = fshape[2] // 2
# finalize
self.filemask = filemask
self.order = order
self.fshape = fshape
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = pipe_3d(self.filemask, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values
(sZ, sY, sX) is a well formated tuple of slices
"""
sZ, sY, sX = slices
# determine which objects should be selected
lenZ, lenY, lenX = self.fshape
xch = range(lenX)[sX]
ych = range(lenY)[sY]
zch = range(lenZ)[sZ]
# create an empty array to store the selected slice
out = np.empty((len(zch), len(ych), len(xch)), dtype=self.dtype)
# read in the data file by file and trace by trace
for zi, z in enumerate(zch):
# open the Z axis file
f = open(self.filemask % (z + 1), 'rb')
for yi, y in enumerate(ych):
ntrace = y
trace = get_trace(f, ntrace, lenX, self.bswap, self.cplex)
out[zi, yi] = trace[sX]
f.close()
return out
class pipestream_3d(fileiobase.data_nd):
"""
Emulate a ndarray objects without loading data into memory for low memory
reading of 3D NMRPipe data stream files (one file data sets).
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filename : str
Filename of 3D NMRPipe stream file.
order : tuple
Ordering of axes against file.
"""
def __init__(self, filename, order=(0, 1, 2)):
"""
Create and set up object
"""
# read and parse the NMRPipe header
fdata = get_fdata(filename) # get the header data
if fdata[2] - 2.345 > 1e-6: # check if byteswapping will be necessary
self.bswap = True
else:
self.bswap = False
dic = fdata2dic(fdata) # create the dictionary
fshape = list(find_shape(dic))
# check last axis quadrature
fn = "FDF" + str(int(dic["FDDIMORDER1"]))
if dic[fn + "QUADFLAG"] == 1.0:
self.cplex = False
self.dtype = np.dtype('float32')
else:
self.cplex = True
self.dtype = np.dtype('complex64')
fshape[2] = fshape[2] // 2
# finalize
self.filename = filename
self.order = order
self.fshape = tuple(fshape)
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = pipestream_3d(self.filename, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values
(sZ, sY, sX) is a well formated tuple of slices
"""
sZ, sY, sX = slices
f = open(self.filename, 'rb') # open the file for reading
# determine which objects should be selected
lenZ, lenY, lenX = self.fshape
xch = range(lenX)[sX]
ych = range(lenY)[sY]
zch = range(lenZ)[sZ]
# create an empty array to store the selected slice
out = np.empty((len(zch), len(ych), len(xch)), dtype=self.dtype)
# read in the data trace by trace
for zi, z in enumerate(zch):
for yi, y in enumerate(ych):
ntrace = y + z * lenY
trace = get_trace(f, ntrace, lenX, self.bswap, self.cplex)
out[zi, yi] = trace[sX]
f.close()
return out
# There are three types of NMRPipe 4D files:
# 1) streams which are single file data sets made with xyz2pipe.
# 2) single index multiple file data sets, named test%03d.ft4, etc.
# 3) two index muttuple file data sets, named test%02d%03d.ft2, made with
# pipe2xyz and conversion binary.
# Low memory objects exist for all three, choose the correct one, or let read
# do it for you.
class pipe_4d(fileiobase.data_nd):
"""
Emulate a ndarray objects without loading data into memory for low memory
reading of single/two index 4D NMRPipe data files.
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filemask : str
Filename of 4D NMRPipe file with one or two formatter (%) operators.
order : tuple
Ordering of axes against file.
fcheck : bool, optional.
True to perform a basic check to see if all files expected for the data
set exist. Raises a IOError if files are missing. Default is False.
"""
def __init__(self, filemask, order=(0, 1, 2, 3), fcheck=False):
"""
Create and set up object, check that files exist if fcheck is True
"""
if filemask.count("%") == 1:
self.singleindex = True
filename = filemask % (1)
elif filemask.count("%") == 2:
self.singleindex = False
filename = filemask % (1, 1)
else:
raise ValueError("bad filemask")
# read and parse the NMRPipe header in the first file of the 3D
fdata = get_fdata(filename) # get the header data
if fdata[2] - 2.345 > 1e-6: # check if byteswapping will be necessary
self.bswap = True
else:
self.bswap = False
# find the shape of the first two dimensions
dic = fdata2dic(fdata) # create the dictionary
fshape = list(find_shape(dic))[-2:]
# find the length of the third dimension
f3 = "FDF" + str(int(dic["FDDIMORDER3"]))
lenZ = int(dic[f3 + "SIZE"])
fshape.insert(0, lenZ) # insert as leading size of fshape
# find the length of the fourth dimension
f4 = "FDF" + str(int(dic["FDDIMORDER4"]))
lenA = int(dic[f4 + "SIZE"])
fshape.insert(0, lenA) # insert as leading size of fshape
# check that all files exist if fcheck is set
if fcheck:
for ai in range(1, lenA + 1):
for zi in range(1, lenZ + 1):
if self.singleindex:
fname = filemask % (ai * lenZ + zi + 1)
else:
fname = filemask % (ai + 1, zi + 1)
if os.path.exists(fname) is False:
raise IOError("File not found: " + str(fname))
# check last axis quadrature
fn = "FDF" + str(int(dic["FDDIMORDER1"]))
if dic[fn + "QUADFLAG"] == 1.0:
self.cplex = False
self.dtype = np.dtype('float32')
else:
self.cplex = True
self.dtype = np.dtype('complex64')
fshape[3] = fshape[3] // 2
# finalize
self.filemask = filemask
self.order = order
self.fshape = fshape
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = pipe_4d(self.filemask, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values
(sZ, sY, sX) is a well formated tuple of slices
"""
sA, sZ, sY, sX = slices
# determine which objects should be selected
lenA, lenZ, lenY, lenX = self.fshape
xch = range(lenX)[sX]
ych = range(lenY)[sY]
zch = range(lenZ)[sZ]
ach = range(lenA)[sA]
# create an empty array to store the selected slice
out = np.empty((len(ach), len(zch), len(ych), len(xch)),
dtype=self.dtype)
# read in the data file by file, trace by trace
for ai, a in enumerate(ach):
for zi, z in enumerate(zch):
if self.singleindex: # single index
f = open(self.filemask % (a * lenZ + z + 1), 'rb')
else: # two index
f = open(self.filemask % (a + 1, z + 1), 'rb')
for yi, y in enumerate(ych):
ntrace = y
trace = get_trace(f, ntrace, lenX, self.bswap, self.cplex)
out[ai, zi, yi] = trace[sX]
f.close()
return out
class pipestream_4d(fileiobase.data_nd):
"""
Emulate a ndarray objects without loading data into memory for low memory
reading of 4D NMRPipe data steams (one file 4D data sets).
* slicing operations return ndarray objects.
* can iterate over with expected results.
* transpose and swapaxes methods create a new objects with correct axes
ordering.
* has ndim, shape, and dtype attributes.
Parameters
----------
filename : str
Filename of 4D NMRPipe stream file.
order : tuple
Ordering of axes against file.
"""
def __init__(self, filename, order=(0, 1, 2, 3)):
"""
Create and set up object
"""
# read and parse the NMRPipe header
fdata = get_fdata(filename) # get the header data
if fdata[2] - 2.345 > 1e-6: # check if byteswapping will be necessary
self.bswap = True
else:
self.bswap = False
dic = fdata2dic(fdata) # create the dictionary
fshape = list(find_shape(dic))
# set object attributes
self.filename = filename
self.order = order
# check last axis quadrature
fn = "FDF" + str(int(dic["FDDIMORDER1"]))
if dic[fn + "QUADFLAG"] == 1.0:
self.cplex = False
self.dtype = np.dtype('float32')
else:
self.cplex = True
self.dtype = np.dtype('complex64')
fshape[3] = fshape[3] // 2
# finalize
self.fshape = tuple(fshape)
self.__setdimandshape__() # set ndim and shape attributes
def __fcopy__(self, order):
"""
Create a copy
"""
n = pipestream_4d(self.filename, order)
return n
def __fgetitem__(self, slices):
"""
Return ndarray of selected values
(sA, sZ, sY, sX) is a well formated tuple of slices
"""
sA, sZ, sY, sX = slices
f = open(self.filename, 'rb') # open the file for reading
# determine which objects should be selected
lenA, lenZ, lenY, lenX = self.fshape
xch = range(lenX)[sX]
ych = range(lenY)[sY]
zch = range(lenZ)[sZ]
ach = range(lenA)[sA]
# create an empty array to store the selected slice
out = np.empty((len(ach), len(zch), len(ych), len(xch)),
dtype=self.dtype)
# read in the data trace by trace
for ai, a in enumerate(ach):
for zi, z in enumerate(zch):
for yi, y in enumerate(ych):
ntrace = y + z * lenY + a * lenY * lenZ
trace = get_trace(f, ntrace, lenX, self.bswap, self.cplex)
out[ai, zi, yi] = trace[sX]
f.close()
return out
# data, see fdata.h
fdata_nums = {
'FDF4CENTER': '82', 'FDF2P0': '109', 'FDF2P1': '110', 'FDF1P1': '246',
'FDF2X1': '257', 'FDF1P0': '245', 'FDF3AQSIGN': '476', 'FDDISPMAX': '251',
'FDF4FTFLAG': '31', 'FDF3X1': '261', 'FDRANK': '180', 'FDF2C1': '418',
'FDF2QUADFLAG': '56', 'FDSLICECOUNT': '443', 'FDFILECOUNT': '442',
'FDMIN': '248', 'FDF3OBS': '10', 'FDF4APODQ2': '407', 'FDF4APODQ1': '406',
'FDF3FTSIZE': '200', 'FDF1LB': '243', 'FDF4C1': '409',
'FDF4QUADFLAG': '54', 'FDF1SW': '229', 'FDTRANSPOSED': '221',
'FDSECS': '285', 'FDF1APOD': '428', 'FDF2APODCODE': '413',
'FDPIPECOUNT': '75', 'FDPEAKBLOCK': '362', 'FDREALSIZE': '97',
'FDF4SIZE': '32', 'FDF4SW': '29', 'FDF4ORIG': '30', 'FDF3XN': '262',
'FDF1OBS': '218', 'FDDISPMIN': '252', 'FDF2XN': '258', 'FDF3P1': '61',
'FDF3P0': '60', 'FDF1ORIG': '249', 'FDF2FTFLAG': '220',
'FDF1TDSIZE': '387', 'FDLASTPLANE': '78', 'FDF1ZF': '437',
'FDF4FTSIZE': '201', 'FDF3C1': '404', 'FDFLTFORMAT': '1', 'FDF4CAR': '69',
'FDF1FTFLAG': '222', 'FDF2OFFPPM': '480', 'FDSIZE': '99',
'FDYEAR': '296', 'FDF1C1': '423', 'FDUSER3': '72', 'FDF1FTSIZE': '98',
'FDMINS': '284', 'FDSCALEFLAG': '250', 'FDF3TDSIZE': '388',
'FDPARTITION': '65', 'FDF3FTFLAG': '13', 'FDF2APODQ1': '415',
'FD2DVIRGIN': '399', 'FDF2APODQ3': '417', 'FDF2APODQ2': '416',
'FD2DPHASE': '256', 'FDMAX': '247', 'FDF3SW': '11', 'FDF4TDSIZE': '389',
'FDPIPEFLAG': '57', 'FDDAY': '295', 'FDF2UNITS': '152',
'FDF4APODQ3': '408', 'FDFIRSTPLANE': '77', 'FDF3SIZE': '15',
'FDF3ZF': '438', 'FDF3ORIG': '12', 'FD1DBLOCK': '365',
'FDF1AQSIGN': '475', 'FDF2OBS': '119', 'FDF1XN': '260',
'FDF4UNITS': '59', 'FDDIMCOUNT': '9', 'FDF4XN': '264', 'FDUSER2': '71',
'FDF4APODCODE': '405', 'FDUSER1': '70', 'FDMCFLAG': '135',
'FDFLTORDER': '2', 'FDUSER5': '74', 'FDF3QUADFLAG': '51',
'FDUSER4': '73', 'FDTEMPERATURE': '157', 'FDF2APOD': '95',
'FDMONTH': '294', 'FDF4OFFPPM': '483', 'FDF3OFFPPM': '482',
'FDF3CAR': '68', 'FDF4P0': '62', 'FDF4P1': '63', 'FDF1OFFPPM': '481',
'FDF4APOD': '53', 'FDF4X1': '263', 'FDLASTBLOCK': '359',
'FDPLANELOC': '14', 'FDF2FTSIZE': '96', 'FDF1X1': '259',
'FDF3CENTER': '81', 'FDF1CAR': '67', 'FDMAGIC': '0',
'FDF2ORIG': '101', 'FDSPECNUM': '219', 'FDF2AQSIGN': '64',
'FDF1UNITS': '234', 'FDF2LB': '111', 'FDF4AQSIGN': '477', 'FDF4ZF': '439',
'FDTAU': '199', 'FDNOISE': '153', 'FDF3APOD': '50',
'FDF1APODCODE': '414', 'FDF2SW': '100', 'FDF4OBS': '28',
'FDQUADFLAG': '106', 'FDF2TDSIZE': '386', 'FDHISTBLOCK': '364',
'FDBASEBLOCK': '361', 'FDF1APODQ2': '421', 'FDF1APODQ3': '422',
'FDF1APODQ1': '420', 'FDF1QUADFLAG': '55', 'FDF3UNITS': '58',
'FDF2ZF': '108', 'FDCONTBLOCK': '360', 'FDDIMORDER4': '27',
'FDDIMORDER3': '26', 'FDDIMORDER2': '25', 'FDDIMORDER1': '24',
'FDF2CAR': '66', 'FDF3APODCODE': '400', 'FDHOURS': '283',
'FDF1CENTER': '80', 'FDF3APODQ1': '401', 'FDF3APODQ2': '402',
'FDF3APODQ3': '403', 'FDBMAPBLOCK': '363', 'FDF2CENTER': '79'
}
fdata_dic = {
'FDF4CENTER': '82', 'FDF2P0': '109', 'FDF2P1': '110', 'FDF1P1': '246',
'FDF2X1': '257', 'FDF1P0': '245', 'FDF3AQSIGN': '476', 'FDDISPMAX': '251',
'FDF4FTFLAG': '31', 'FDF3X1': '261', 'FDRANK': '180', 'FDF2C1': '418',
'FDF2QUADFLAG': '56', 'FDSLICECOUNT': '443', 'FDFILECOUNT': '442',
'FDMIN': '248', 'FDF3OBS': '10', 'FDF4APODQ2': '407', 'FDF4APODQ1': '406',
'FDF3FTSIZE': '200', 'FDF1LB': '243', 'FDF4C1': '409',
'FDF4QUADFLAG': '54', 'FDF1SW': '229', 'FDTRANSPOSED': '221',
'FDSECS': '285', 'FDF1APOD': '428', 'FDF2APODCODE': '413',
'FDPIPECOUNT': '75', 'FDOPERNAME': '464',
'FDF3LABEL': '20', 'FDPEAKBLOCK': '362', 'FDREALSIZE': '97',
'FDF4SIZE': '32', 'FDF4SW': '29', 'FDF4ORIG': '30', 'FDF3XN': '262',
'FDF1OBS': '218', 'FDDISPMIN': '252', 'FDF2XN': '258', 'FDF3P1': '61',
'FDF3P0': '60', 'FDF1ORIG': '249', 'FDF2FTFLAG': '220',
'FDF1TDSIZE': '387', 'FDLASTPLANE': '78', 'FDF1ZF': '437',
'FDF4FTSIZE': '201', 'FDF3C1': '404', 'FDFLTFORMAT': '1', 'FDF4CAR': '69',
'FDF1FTFLAG': '222', 'FDF2OFFPPM': '480', 'FDF1LABEL': '18',
'FDSIZE': '99', 'FDYEAR': '296', 'FDF1C1': '423', 'FDUSER3': '72',
'FDF1FTSIZE': '98', 'FDMINS': '284', 'FDSCALEFLAG': '250',
'FDF3TDSIZE': '388', 'FDTITLE': '297', 'FDPARTITION': '65',
'FDF3FTFLAG': '13', 'FDF2APODQ1': '415', 'FD2DVIRGIN': '399',
'FDF2APODQ3': '417', 'FDF2APODQ2': '416', 'FD2DPHASE': '256',
'FDMAX': '247', 'FDF3SW': '11', 'FDF4TDSIZE': '389', 'FDPIPEFLAG': '57',
'FDDAY': '295', 'FDF2UNITS': '152', 'FDF4APODQ3': '408',
'FDFIRSTPLANE': '77', 'FDF3SIZE': '15', 'FDF3ZF': '438',
'FDDIMORDER': '24', 'FDF3ORIG': '12', 'FD1DBLOCK': '365',
'FDF1AQSIGN': '475', 'FDF2OBS': '119', 'FDF1XN': '260',
'FDF4UNITS': '59', 'FDDIMCOUNT': '9', 'FDF4XN': '264', 'FDUSER2': '71',
'FDF4APODCODE': '405', 'FDUSER1': '70', 'FDMCFLAG': '135',
'FDFLTORDER': '2', 'FDUSER5': '74', 'FDCOMMENT': '312',
'FDF3QUADFLAG': '51', 'FDUSER4': '73', 'FDTEMPERATURE': '157',
'FDF2APOD': '95', 'FDMONTH': '294', 'FDF4OFFPPM': '483',
'FDF3OFFPPM': '482', 'FDF3CAR': '68', 'FDF4P0': '62',
'FDF4P1': '63', 'FDF1OFFPPM': '481', 'FDF4APOD': '53', 'FDF4X1': '263',
'FDLASTBLOCK': '359', 'FDPLANELOC': '14', 'FDF2FTSIZE': '96',
'FDUSERNAME': '290', 'FDF1X1': '259', 'FDF3CENTER': '81',
'FDF1CAR': '67', 'FDMAGIC': '0', 'FDF2ORIG': '101', 'FDSPECNUM': '219',
'FDF2LABEL': '16', 'FDF2AQSIGN': '64', 'FDF1UNITS': '234',
'FDF2LB': '111', 'FDF4AQSIGN': '477', 'FDF4ZF': '439', 'FDTAU': '199',
'FDF4LABEL': '22', 'FDNOISE': '153', 'FDF3APOD': '50',
'FDF1APODCODE': '414', 'FDF2SW': '100', 'FDF4OBS': '28',
'FDQUADFLAG': '106', 'FDF2TDSIZE': '386', 'FDHISTBLOCK': '364',
'FDSRCNAME': '286', 'FDBASEBLOCK': '361', 'FDF1APODQ2': '421',
'FDF1APODQ3': '422', 'FDF1APODQ1': '420', 'FDF1QUADFLAG': '55',
'FDF3UNITS': '58', 'FDF2ZF': '108', 'FDCONTBLOCK': '360',
'FDDIMORDER4': '27', 'FDDIMORDER3': '26', 'FDDIMORDER2': '25',
'FDDIMORDER1': '24', 'FDF2CAR': '66', 'FDF3APODCODE': '400',
'FDHOURS': '283', 'FDF1CENTER': '80', 'FDF3APODQ1': '401',
'FDF3APODQ2': '402', 'FDF3APODQ3': '403', 'FDBMAPBLOCK': '363',
'FDF2CENTER': '79'
}
| {
"content_hash": "45090af7dbcdac5887f71cce9e80ce48",
"timestamp": "",
"source": "github",
"line_count": 2258,
"max_line_length": 79,
"avg_line_length": 30.82860938883968,
"alnum_prop": 0.5639338610277111,
"repo_name": "atomman/nmrglue",
"id": "642a4893534efb9fd268c8afcbe2bd187c840d32",
"size": "69611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nmrglue/fileio/pipe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "25386"
},
{
"name": "Python",
"bytes": "837166"
},
{
"name": "Shell",
"bytes": "10372"
}
],
"symlink_target": ""
} |
from django.views import generic
from .base import TemplateNameResolverMixin
__all__ = (
'ListView',
)
class ListViewMixin(TemplateNameResolverMixin):
template_suffixes = ('list',)
class ListView(ListViewMixin, generic.ListView):
pass
| {
"content_hash": "d36afc0e5ad438d1d71ae6896895e42c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 16.125,
"alnum_prop": 0.7209302325581395,
"repo_name": "jleeothon/canclon",
"id": "801068b0d1252eb55167cbc49c86ec4c9c1c0bae",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canclon/multiple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3895"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# -- stdlib --
import json
import os
import socket
import sys
import time
# -- third party --
# -- own --
# -- code --
ts = int(time.time())
rst = []
if os.system('which docker > /dev/null'):
print '[]'
sys.exit(0)
stuck = bool(os.system("timeout -k 10s 10s sudo docker ps > /dev/null 2>&1"))
rst = [{
'metric': 'docker.stuck',
'timestamp': ts,
'step': 60,
'value': int(stuck),
}]
print json.dumps(rst)
| {
"content_hash": "b978e8f8df48a096d1f16d1d916ef483",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 15.766666666666667,
"alnum_prop": 0.587737843551797,
"repo_name": "kadashu/satori",
"id": "1ed9f709cffa0f4b8b7b6ad061ea7407aad65fd4",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori-rules/plugin/docker/60_docker_ps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189613"
},
{
"name": "Clojure",
"bytes": "52043"
},
{
"name": "Go",
"bytes": "102479"
},
{
"name": "HTML",
"bytes": "10254"
},
{
"name": "JavaScript",
"bytes": "16585"
},
{
"name": "Python",
"bytes": "4195260"
},
{
"name": "Ruby",
"bytes": "2312"
},
{
"name": "Shell",
"bytes": "18923"
},
{
"name": "Smarty",
"bytes": "4807"
}
],
"symlink_target": ""
} |
import csv, sys
from setup import db
from models.trawler import Trawler
def reload_data(db):
db.sources.remove()
for i, url in enumerate(csv.reader(open('models/sources/urls.csv'))):
if i == 0:
continue
db.sources.insert({'url': url[0],
'tags': url[1].split(','),
'domain': url[2],
'weight': 1,
'has_rules': False
})
def trawl_sources(db):
Trawler(db).trawl()
if __name__ == '__main__':
try:
if sys.argv[1] == 'trawl':
trawl_sources(db)
elif sys.argv[1] == 'reload':
reload_data(db)
else:
print 'I pity you fool!'
except IndexError:
print 'Supply correct arg fool!'
| {
"content_hash": "7e6dd0fa7aa90f02744b376bf39d62c7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 24.161290322580644,
"alnum_prop": 0.5100133511348465,
"repo_name": "damilare/mitiri",
"id": "34d0548ac53041c32a70a72727dbc8df762a9be8",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4651"
},
{
"name": "JavaScript",
"bytes": "1646"
},
{
"name": "Python",
"bytes": "8052"
}
],
"symlink_target": ""
} |
from random import randint
def roll_dice():
""" simulate roll dice """
results = []
for num in range(times):
result = randint(1, sides)
results.append(result)
return results
def frequencies(x):
""" calculate frequency of each time """
results = []
for num in range(1, sides+1):
result = x.count(num)
results.append(result)
return results
# define the die sides and roll times
sides = 6
times = 1000
# calculate results and the frequency of each side
roll_results = roll_dice()
# print(roll_results)
frequencies = frequencies(roll_results)
print(frequencies)
# visualize using pygal
import pygal
# plot the chart using bars
freq_visual = pygal.Bar()
# optimize the chart
freq_visual.title = 'Rolling Results of 1,000 times'
freq_visual.x_labels = [str(x) for x in range(1, 7)]
freq_visual.x_title = 'Results'
freq_visual.y_title = 'Frequency'
# plot and save to file
freq_visual.add('6-side Dice', frequencies)
freq_visual.render_to_file('dice.svg') | {
"content_hash": "b56856294ab4b13095111fc8908bf1c3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 52,
"avg_line_length": 23.72093023255814,
"alnum_prop": 0.6843137254901961,
"repo_name": "littleocub/python_practice",
"id": "fe5c825bc9b58e3d06eeed8837c03a8bd4197f50",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roll_dice_pygal/dice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5090"
}
],
"symlink_target": ""
} |
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| {
"content_hash": "899de647fd71bd11977fb8abf989c82c",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 95,
"avg_line_length": 29.549618320610687,
"alnum_prop": 0.5833763885300955,
"repo_name": "WillisXChen/django-oscar",
"id": "ce2ecdfee30eeb9fa8adbce066a264fb7abf2a60",
"size": "15526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/bin/pildriver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "78"
},
{
"name": "C",
"bytes": "5979"
},
{
"name": "C++",
"bytes": "572"
},
{
"name": "CSS",
"bytes": "694578"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "21346"
},
{
"name": "HTML",
"bytes": "708061"
},
{
"name": "JavaScript",
"bytes": "1433937"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Makefile",
"bytes": "6656"
},
{
"name": "Python",
"bytes": "47548581"
},
{
"name": "Shell",
"bytes": "6790"
},
{
"name": "Smarty",
"bytes": "21023"
},
{
"name": "TeX",
"bytes": "56837"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
# Dependency imports
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import translate
from tensor2tensor.utils import registry
import tensorflow as tf
_ENDE_TRAIN_DATASETS = [
[
"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz", # pylint: disable=line-too-long
("training/news-commentary-v12.de-en.en",
"training/news-commentary-v12.de-en.de")
],
[
"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
("commoncrawl.de-en.en", "commoncrawl.de-en.de")
],
[
"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
("training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de")
],
]
_ENDE_TEST_DATASETS = [
[
"http://data.statmt.org/wmt17/translation-task/dev.tgz",
("dev/newstest2013.en", "dev/newstest2013.de")
],
]
def _get_wmt_ende_bpe_dataset(directory, filename):
"""Extract the WMT en-de corpus `filename` to directory unless it's there."""
train_path = os.path.join(directory, filename)
if not (tf.gfile.Exists(train_path + ".de") and
tf.gfile.Exists(train_path + ".en")):
url = ("https://drive.google.com/uc?export=download&id="
"0B_bZck-ksdkpM25jRUN2X2UxMm8")
corpus_file = generator_utils.maybe_download_from_drive(
directory, "wmt16_en_de.tar.gz", url)
with tarfile.open(corpus_file, "r:gz") as corpus_tar:
corpus_tar.extractall(directory)
return train_path
@registry.register_problem
class TranslateEndeWmtBpe32k(translate.TranslateProblem):
"""Problem spec for WMT En-De translation, BPE version."""
@property
def approx_vocab_size(self):
return 32000
@property
def vocab_filename(self):
return "vocab.bpe.%d" % self.approx_vocab_size
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):
vocab_filename = os.path.join(data_dir, self.vocab_filename)
if not tf.gfile.Exists(vocab_filename) and force_get:
raise ValueError("Vocab %s not found" % vocab_filename)
return text_encoder.TokenTextEncoder(vocab_filename, replace_oov="UNK")
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Instance of token generator for the WMT en->de task, training set."""
train = dataset_split == problem.DatasetSplit.TRAIN
dataset_path = ("train.tok.clean.bpe.32000"
if train else "newstest2013.tok.bpe.32000")
train_path = _get_wmt_ende_bpe_dataset(tmp_dir, dataset_path)
# Vocab
token_path = os.path.join(data_dir, self.vocab_filename)
if not tf.gfile.Exists(token_path):
token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)
tf.gfile.Copy(token_tmp_path, token_path)
with tf.gfile.GFile(token_path, mode="r") as f:
vocab_data = "<pad>\n<EOS>\n" + f.read() + "UNK\n"
with tf.gfile.GFile(token_path, mode="w") as f:
f.write(vocab_data)
return text_problems.text2text_txt_iterator(train_path + ".en",
train_path + ".de")
@registry.register_problem
class TranslateEndeWmt8k(translate.TranslateProblem):
"""Problem spec for WMT En-De translation."""
@property
def approx_vocab_size(self):
return 2**13 # 8192
@property
def vocab_filename(self):
return "vocab.ende.%d" % self.approx_vocab_size
def source_data_files(self, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
return _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS
@registry.register_problem
class TranslateEndeWmt32k(TranslateEndeWmt8k):
@property
def approx_vocab_size(self):
return 2**15 # 32768
@registry.register_problem
class TranslateEndeWmt32kPacked(TranslateEndeWmt32k):
@property
def packed_length(self):
return 256
@registry.register_problem
class TranslateEndeWmtCharacters(translate.TranslateProblem):
"""Problem spec for WMT En-De translation."""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
| {
"content_hash": "c8475c3300921dfd19a4e31b5208dad8",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 118,
"avg_line_length": 32.44117647058823,
"alnum_prop": 0.6915231187669991,
"repo_name": "rsepassi/tensor2tensor",
"id": "b493ec5c9d44e405ceb019d97f121976f4cabedd",
"size": "5018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/data_generators/translate_ende.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2328225"
},
{
"name": "Python",
"bytes": "1702690"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
} |
from toee import *
import char_class_utils
import char_editor
###################################################
def GetConditionName(): # used by API
return "Bard"
# def GetSpellCasterConditionName():
# return "Bard Spellcasting"
def GetCategory():
return "Core 3.5 Ed Classes"
def GetClassDefinitionFlags():
return CDF_BaseClass | CDF_CoreClass
def GetClassHelpTopic():
return "TAG_BARDS"
classEnum = stat_level_bard
###################################################
class_feats = {
1: (feat_simple_weapon_proficiency_bard, feat_shield_proficiency,
feat_bardic_music, feat_bardic_knowledge )
#feat_martial_weapon_proficiency_composite_longbow,
#feat_martial_weapon_proficiency_longbow, feat_martial_weapon_proficiency_longsword,
#feat_martial_weapon_proficiency_rapier, feat_martial_weapon_proficiency_sap,
#feat_martial_weapon_proficiency_composite_shortbow,
#feat_martial_weapon_proficiency_short_sword,feat_martial_weapon_proficiency_shortbow )
}
class_skills = (skill_alchemy, skill_appraise, skill_balance, skill_bluff, skill_climb, skill_concentration, skill_craft, skill_decipher_script, skill_diplomacy, skill_decipher_script, skill_diplomacy, skill_disguise, skill_escape_artist, skill_gather_information, skill_hide, skill_jump, skill_knowledge_nature, skill_knowledge_all, skill_listen, skill_move_silently, skill_perform, skill_profession, skill_sense_motive, skill_pick_pocket, skill_spellcraft, skill_swim, skill_tumble, skill_use_magic_device)
spells_per_day = {
1: (2,),
2: (3, 0),
3: (3, 1),
4: (3, 2, 0),
5: (3, 3, 1),
6: (3, 3, 2),
7: (3, 3, 2, 0),
8: (3, 3, 3, 1),
9: (3, 3, 3, 2),
10: (3, 3, 3, 2, 0),
11: (3, 3, 3, 3, 1),
12: (3, 3, 3, 3, 2),
13: (3, 3, 3, 3, 2, 0),
14: (4, 3, 3, 3, 3, 1),
15: (4, 4, 3, 3, 3, 2),
16: (4, 4, 4, 3, 3, 2, 0),
17: (4, 4, 4, 4, 3, 3, 1),
18: (4, 4, 4, 4, 4, 3, 2),
19: (4, 4, 4, 4, 4, 4, 3),
20: (4, 4, 4, 4, 4, 4, 4)
#lvl 0 1 2 3 4 5 6 7 8 9
}
spells_known = {
1: (4,),
2: (5, 2),
3: (6, 3),
4: (6, 3, 2),
5: (6, 4, 3),
6: (6, 4, 3),
7: (6, 4, 4, 2),
8: (6, 4, 4, 3),
9: (6, 4, 4, 3),
10: (6, 4, 4, 4, 2),
11: (6, 4, 4, 4, 3),
12: (6, 4, 4, 4, 3),
13: (6, 4, 4, 4, 4, 2),
14: (6, 4, 4, 4, 4, 3),
15: (6, 4, 4, 4, 4, 3),
16: (6, 5, 4, 4, 4, 4, 2),
17: (6, 5, 5, 4, 4, 4, 3),
18: (6, 5, 5, 5, 4, 4, 3),
19: (6, 5, 5, 5, 5, 4, 4),
20: (6, 5, 5, 5, 5, 5, 4)
#lvl 0 1 2 3 4 5 6 7 8 9
}
def GetHitDieType():
return 6
def GetSkillPtsPerLevel():
return 6
def GetBabProgression():
return base_attack_bonus_type_semi_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 1
def IsWillSaveFavored():
return 1
# Spell casting
def HasArmoredArcaneCasterFeature():
return 1
def GetSpellListType():
return spell_list_type_bardic
def GetSpellSourceType():
return spell_source_type_arcane
def GetSpellReadyingType():
return spell_readying_innate
def GetSpellsPerDay():
return spells_per_day
caster_levels = range(1, 21)
def GetCasterLevels():
return caster_levels
def GetSpellDeterminingStat():
return stat_charisma
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
if (alignment & ALIGNMENT_LAWFUL) != 0:
return 0
return 1
def ObjMeetsPrereqs( obj ):
abScore = obj.stat_base_get(stat_charisma)
if abScore > 10:
return 1
return 0
## Levelup callbacks
def IsSelectingSpellsOnLevelup( obj ):
return 1
def InitSpellSelection( obj, classLvlNew = -1, classLvlIncrement = 1):
classLvl = obj.stat_level_get(classEnum)
if classLvlNew <= 0:
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew ) # this regards spell list extension by stuff like Mystic Theurge (no need to use spellListLvl as below)
# Available Spells
spAvail = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
# add spell level labels
for p in range(0,maxSpellLvl+1):
spAvail.append(char_editor.KnownSpellInfo(spell_label_level_0 + p, 0, classEnum))
spAvail.sort()
char_editor.append_available_spells(spAvail)
# Spell Slots
if classLvlNew == 1: #newly taken class
spEnums = []
# 4 cantrips
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0, 0, classEnum)) # add "Level 0" label
for p in range(0,4):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0, 3, classEnum))
char_editor.append_spell_enums(spEnums)
return 0
# Incrementing class level
spellListLvl = obj.stat_level_get(stat_spell_list_level, classEnum) + classLvlIncrement # the effective level for getting the number of spells known
spEnums = char_editor.get_known_class_spells(obj, classEnum) # get all spells known for this class
for spellLvl in range(0, maxSpellLvl+1):
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0 + spellLvl, 0, classEnum)) # add label
# add
newSpellsKnownCount = char_class_utils.GetSpellsKnownAddedCount( spells_known , spellListLvl, spellLvl)
for q in range(0, newSpellsKnownCount):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0 + spellLvl, 3, classEnum))
isReplacing = 0
if spellListLvl >= 5 and ((spellListLvl - 5) % 3) == 0: # spell replacement
isReplacing = 1
if char_editor.get_class_code() != classEnum: #grant this benefit only for strict levelup (also to prevent some headache...)
isReplacing = 0
if isReplacing == 0:
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
# mark as replaceable
for p in range(0,len(spEnums)):
spEnum = spEnums[p].spell_enum
if spell_vacant <= spEnum <= spell_label_level_9:
continue
if spell_new_slot_lvl_0 <= spEnum <= spell_new_slot_lvl_9:
continue
if char_editor.get_spell_level(spEnum, classEnum) <= maxSpellLvl-2:
spEnums[p].spell_status = 1
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
def LevelupCheckSpells( obj):
spell_enums = char_editor.get_spell_enums()
for spInfo in spell_enums:
spClass = spInfo.get_casting_class()
if spClass != stat_level_bard:
continue
if spInfo.spell_enum == spell_vacant:
return 0
return 1
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
spEnums = char_editor.get_spell_enums()
char_editor.spell_known_add(spEnums) # internally takes care of duplicates and the labels/vacant slots
return | {
"content_hash": "9e5b86debda16ecf9d51816c9e8d778a",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 508,
"avg_line_length": 28.41850220264317,
"alnum_prop": 0.6840799875988219,
"repo_name": "GrognardsFromHell/TemplePlus",
"id": "2ea4e489687236929c02038fc1579b2863565f2d",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpdatasrc/tpgamefiles/rules/char_class/class008_bard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "629718"
},
{
"name": "C#",
"bytes": "167885"
},
{
"name": "C++",
"bytes": "10018792"
},
{
"name": "CMake",
"bytes": "91980"
},
{
"name": "CSS",
"bytes": "1292"
},
{
"name": "HLSL",
"bytes": "18884"
},
{
"name": "HTML",
"bytes": "433942"
},
{
"name": "PowerShell",
"bytes": "5374"
},
{
"name": "Python",
"bytes": "2850350"
}
],
"symlink_target": ""
} |
from quipp import *
def run():
num_components = 2
point_dim = 5
ComponentsType = Vector(num_components, Double)
PointType = Vector(point_dim, Double)
get_point = rand_function(ComponentsType, PointType)
def sample():
components = [normal(0, 1) for i in range(num_components)]
return (components, get_point(components))
return sample
run_factor_analysis_example(run)
| {
"content_hash": "8546f47f82b035404bc2768379ca9c81",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.7081218274111675,
"repo_name": "jessica-taylor/quipp2",
"id": "a5ff85c3e3e4dfd0adc4a12b0c286a9798e73d24",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/examples/factor_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "91745"
},
{
"name": "Python",
"bytes": "21372"
},
{
"name": "TeX",
"bytes": "26187"
}
],
"symlink_target": ""
} |
__author__ = 'eperez'
from datetime import datetime
from bson import ObjectId
from flask import request
from eduid_userdb.actions import Action
from eduid_userdb.actions.tou import ToUUser, ToUUserDB
from eduid_userdb.tou import ToUEvent
from eduid_webapp.actions.action_abc import ActionPlugin
from eduid_webapp.actions.app import current_actions_app as current_app
from eduid_webapp.actions.helpers import ActionsMsg
class Plugin(ActionPlugin):
PLUGIN_NAME = 'tou'
PACKAGE_NAME = 'eduid_webapp.actions.actions.tou'
steps = 1
def __init__(self):
super(Plugin, self).__init__()
# This import has to happen _after_ eduid_am has been initialized
from eduid_am.tasks import update_attributes_keep_result
self._update_attributes = update_attributes_keep_result
@classmethod
def includeme(cls, app):
app.tou_db = ToUUserDB(app.conf.mongo_uri)
def get_config_for_bundle(self, action: Action):
tous = current_app.get_tous(version=action.params['version'])
if not tous:
current_app.logger.error('Could not load any TOUs')
raise self.ActionError(ActionsMsg.no_tou)
return {
'version': action.params['version'],
'tous': tous,
'available_languages': current_app.conf.available_languages,
}
def perform_step(self, action: Action):
if not request.get_json().get('accept', ''):
raise self.ActionError(ActionsMsg.must_accept)
eppn = action.eppn
central_user = current_app.central_userdb.get_user_by_eppn(eppn)
user = ToUUser.from_user(central_user, current_app.tou_db)
current_app.logger.debug('Loaded ToUUser {} from db'.format(user))
version = action.params['version']
existing_tou = user.tou.find(version)
if existing_tou:
current_app.logger.info('ToU version {} reaccepted by user {}'.format(version, user))
existing_tou.modified_ts = datetime.utcnow()
else:
current_app.logger.info('ToU version {} accepted by user {}'.format(version, user))
user.tou.add(
ToUEvent(
version=version,
created_by='eduid_tou_plugin',
created_ts=datetime.utcnow(),
modified_ts=datetime.utcnow(),
event_id=str(ObjectId()),
)
)
current_app.tou_db.save(user, check_sync=False)
current_app.logger.debug("Asking for sync of {} by Attribute Manager".format(user))
rtask = self._update_attributes.delay('eduid_tou', str(user.user_id))
try:
result = rtask.get(timeout=10)
current_app.logger.debug("Attribute Manager sync result: {!r}".format(result))
current_app.actions_db.remove_action_by_id(action.action_id)
current_app.logger.info('Removed completed action {}'.format(action))
return {}
except Exception as e:
current_app.logger.error("Failed Attribute Manager sync request: " + str(e))
raise self.ActionError(ActionsMsg.sync_problem)
| {
"content_hash": "e543ce1930abfdb1acc6666e6c87c575",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 97,
"avg_line_length": 37.88095238095238,
"alnum_prop": 0.6275927089880579,
"repo_name": "SUNET/eduid-webapp",
"id": "ee299e5a564bcdcac55214153139c82118422154",
"size": "4774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eduid_webapp/actions/actions/tou.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "433"
},
{
"name": "HTML",
"bytes": "46956"
},
{
"name": "Python",
"bytes": "1041956"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
} |
"""Python implementation of PVL (Parameter Value Language), with upcoming
features.
If you currently use::
import pvl
you can change to::
import pvl.new as pvl
And then use all of the pvl functions as you usually would. You
will also need to have the 3rd party multidict library
(https://github.com/aio-libs/multidict, conda installable) installed.
But then, any objects that are returned by the load functions will
be the new PVLMultiDict objects.
"""
# Copyright 2015, 2017, 2019-2021, ``pvl`` library authors.
#
# Reuse is permitted under the terms of the license.
# The AUTHORS file and the LICENSE file are at the
# top level of this library.
import inspect
import io
import urllib.request
from pathlib import Path
try: # noqa: C901
# In order to access super class attributes for our derived class, we must
# import the native Python version, instead of the default Cython version.
from multidict._multidict_py import MultiDict # noqa: F401
except ImportError as err:
raise ImportError(
"The multidict library is not present, so the new PVLMultiDict is not "
"available, and pvl.new can't be imported. In order to do so, install "
"the multidict package",
ImportWarning,
) from err
from pvl import * # noqa: F401,F403
from pvl import get_text_from, decode_by_char
from .encoder import PDSLabelEncoder, PVLEncoder
from .parser import PVLParser, OmniParser
from .collections import PVLModuleNew, PVLGroupNew, PVLObjectNew
__all__ = [
"PVLModuleNew",
"PVLGroupNew",
"PVLObjectNew",
]
def load(path, parser=None, grammar=None, decoder=None, **kwargs):
"""Returns a Python object from parsing the file at *path*.
:param path: an :class:`os.PathLike` which presumably has a
PVL Module in it to parse.
:param parser: defaults to :class:`pvl.parser.OmniParser()`.
:param grammar: defaults to :class:`pvl.grammar.OmniGrammar()`.
:param decoder: defaults to :class:`pvl.decoder.OmniDecoder()`.
:param ``**kwargs``: the keyword arguments that will be passed
to :func:`loads()` and are described there.
If *path* is not an :class:`os.PathLike`, it will be assumed to be an
already-opened file object, and ``.read()`` will be applied
to extract the text.
If the :class:`os.PathLike` or file object contains some bytes
decodable as text, followed by some that is not (e.g. an ISIS
cube file), that's fine, this function will just extract the
decodable text.
"""
return loads(
get_text_from(path),
parser=parser,
grammar=grammar,
decoder=decoder,
**kwargs
)
def loadu(url, parser=None, grammar=None, decoder=None, **kwargs):
"""Returns a Python object from parsing *url*.
:param url: this will be passed to :func:`urllib.request.urlopen`
and can be a string or a :class:`urllib.request.Request` object.
:param parser: defaults to :class:`pvl.parser.OmniParser()`.
:param grammar: defaults to :class:`pvl.grammar.OmniGrammar()`.
:param decoder: defaults to :class:`pvl.decoder.OmniDecoder()`.
:param ``**kwargs``: the keyword arguments that will be passed
to :func:`urllib.request.urlopen` and to :func:`loads()`.
The ``**kwargs`` will first be scanned for arguments that
can be given to :func:`urllib.request.urlopen`. If any are
found, they are extracted and used. All remaining elements
will be passed on as keyword arguments to :func:`loads()`.
Note that *url* can be any URL that :func:`urllib.request.urlopen`
takes. Certainly http and https URLs, but also file, ftp, rsync,
sftp and more!
"""
# Peel off the args for urlopen:
url_args = dict()
for a in inspect.signature(urllib.request.urlopen).parameters.keys():
if a in kwargs:
url_args[a] = kwargs.pop(a)
# The object returned from urlopen will always have a .read()
# function that returns bytes, so:
with urllib.request.urlopen(url, **url_args) as resp:
s = decode_by_char(resp)
return loads(s, parser=parser, grammar=grammar, decoder=decoder, **kwargs)
def loads(s: str, parser=None, grammar=None, decoder=None, **kwargs):
"""Deserialize the string, *s*, as a Python object.
:param s: contains some PVL to parse.
:param parser: defaults to :class:`pvl.parser.OmniParser() which will
return the new PVLMultiDict-derived objects`.
:param grammar: defaults to :class:`pvl.grammar.OmniGrammar()`.
:param decoder: defaults to :class:`pvl.decoder.OmniDecoder()`.
:param ``**kwargs``: the keyword arguments to pass to the *parser* class
if *parser* is none.
"""
if isinstance(s, bytes):
# Someone passed us an old-style bytes sequence. Although it isn't
# a string, we can deal with it:
s = s.decode()
if parser is None:
parser = OmniParser(
grammar=grammar,
decoder=decoder,
module_class=PVLModuleNew,
group_class=PVLGroupNew,
object_class=PVLObjectNew,
**kwargs
)
elif not isinstance(parser, PVLParser):
raise TypeError("The parser must be an instance of pvl.PVLParser.")
return parser.parse(s)
def dump(module, path, **kwargs):
"""Serialize *module* as PVL text to the provided *path*.
:param module: a ``PVLModule`` or ``dict``-like object to serialize.
:param path: an :class:`os.PathLike`
:param ``**kwargs``: the keyword arguments to pass to :func:`dumps()`.
If *path* is an :class:`os.PathLike`, it will attempt to be opened
and the serialized module will be written into that file via
the :func:`pathlib.Path.write_text()` function, and will return
what that function returns.
If *path* is not an :class:`os.PathLike`, it will be assumed to be an
already-opened file object, and ``.write()`` will be applied
on that object to write the serialized module, and will return
what that function returns.
"""
try:
p = Path(path)
return p.write_text(dumps(module, **kwargs))
except TypeError:
# Not an os.PathLike, maybe it is an already-opened file object
try:
if isinstance(path, io.TextIOBase):
return path.write(dumps(module, **kwargs))
else:
return path.write(dumps(module, **kwargs).encode())
except AttributeError:
# Not a path, not an already-opened file.
raise TypeError(
"Expected an os.PathLike or an already-opened "
"file object for writing, but got neither."
)
def dumps(module, encoder=None, grammar=None, decoder=None, **kwargs) -> str:
"""Returns a string where the *module* object has been serialized
to PVL syntax.
:param module: a ``PVLModule`` or ``dict`` like object to serialize.
:param encoder: defaults to :class:`pvl.parser.PDSLabelEncoder()`.
:param grammar: defaults to :class:`pvl.grammar.ODLGrammar()`.
:param decoder: defaults to :class:`pvl.decoder.ODLDecoder()`.
:param ``**kwargs``: the keyword arguments to pass to the encoder
class if *encoder* is none.
"""
if encoder is None:
encoder = PDSLabelEncoder(
grammar=grammar,
decoder=decoder,
group_class=PVLGroupNew,
object_class=PVLObjectNew,
**kwargs)
elif not isinstance(encoder, PVLEncoder):
raise TypeError("The encoder must be an instance of pvl.PVLEncoder.")
return encoder.encode(module)
| {
"content_hash": "9fa693769b018ddb13072169e11d7ee4",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 36.95192307692308,
"alnum_prop": 0.6567785584179027,
"repo_name": "planetarypy/pvl",
"id": "4d6ac9b9d85883a7f30518f176b71673213aff9f",
"size": "7710",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pvl/new.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "344634"
}
],
"symlink_target": ""
} |
"""Class decorator for adding properties for arguments"""
from functools import partial
__all__ = []
__private__ = ['properties_for_args']
def properties_for_args(cls, arg_names='_arg_names'):
"""For a class with an attribute `arg_names` containing a list of names,
add a property for every name in that list.
It is assumed that there is an instance attribute ``self._<arg_name>``,
which is returned by the `arg_name` property. The decorator also adds a
class attribute :attr:`_has_properties_for_args` that may be used to ensure
that a class is decorated.
"""
from qnet.algebra.core.scalar_algebra import Scalar
scalar_args = False
if hasattr(cls, '_scalar_args'):
scalar_args = cls._scalar_args
for arg_name in getattr(cls, arg_names):
def get_arg(self, name):
val = getattr(self, "_%s" % name)
if scalar_args:
assert isinstance(val, Scalar)
return val
prop = property(partial(get_arg, name=arg_name))
doc = "The `%s` argument" % arg_name
if scalar_args:
doc += ", as a :class:`.Scalar` instance."
else:
doc += "."
prop.__doc__ = doc
setattr(cls, arg_name, prop)
cls._has_properties_for_args = True
return cls
| {
"content_hash": "ce150d341e2e819035da34a2c8e8c26a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 36.361111111111114,
"alnum_prop": 0.6096256684491979,
"repo_name": "mabuchilab/QNET",
"id": "9d0e3adf669d191c11a7132f77b32370eee3328c",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/qnet/utils/properties_for_args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3915"
},
{
"name": "Python",
"bytes": "1100786"
}
],
"symlink_target": ""
} |
"""Imagenet datasets."""
import io
import os
import tarfile
from absl import logging
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
ILSVRC 2012, commonly known as 'ImageNet' is an image dataset organized
according to the WordNet hierarchy. Each meaningful concept in WordNet,
possibly described by multiple words or word phrases, is called a "synonym set"
or "synset". There are more than 100,000 synsets in WordNet, majority of them
are nouns (80,000+). In ImageNet, we aim to provide on average 1000 images to
illustrate each synset. Images of each concept are quality-controlled and
human-annotated. In its completion, we hope ImageNet will offer tens of
millions of cleanly sorted images for most of the concepts in the WordNet
hierarchy.
The test split contains 100K images but no labels because no labels have been
publicly released. We provide support for the test split from 2012 with the
minor patch released on October 10, 2019. In order to manually download this
data, a user must perform the following operations:
1. Download the 2012 test split available [here](https://image-net.org/challenges/LSVRC/2012/2012-downloads.php#Images).
2. Download the October 10, 2019 patch. There is a Google Drive link to the
patch provided on the same page.
3. Combine the two tar-balls, manually overwriting any images in the original
archive with images from the patch. According to the instructions on
image-net.org, this procedure overwrites just a few images.
The resulting tar-ball may then be processed by TFDS.
To assess the accuracy of a model on the ImageNet test split, one must run
inference on all images in the split, export those results to a text file that
must be uploaded to the ImageNet evaluation server. The maintainers of the
ImageNet evaluation server permits a single user to submit up to 2 submissions
per week in order to prevent overfitting.
To evaluate the accuracy on the test split, one must first create an account at
image-net.org. This account must be approved by the site administrator. After
the account is created, one can submit the results to the test server at
https://image-net.org/challenges/LSVRC/eval_server.php
The submission consists of several ASCII text files corresponding to multiple
tasks. The task of interest is "Classification submission (top-5 cls error)".
A sample of an exported text file looks like the following:
```
771 778 794 387 650
363 691 764 923 427
737 369 430 531 124
755 930 755 59 168
```
The export format is described in full in "readme.txt" within the 2013
development kit available here:
https://image-net.org/data/ILSVRC/2013/ILSVRC2013_devkit.tgz
Please see the section entitled "3.3 CLS-LOC submission format". Briefly, the
format of the text file is 100,000 lines corresponding to each image in the test
split. Each line of integers correspond to the rank-ordered, top 5 predictions
for each test image. The integers are 1-indexed corresponding to the line number
in the corresponding labels file. See imagenet2012_labels.txt.
"""
# Web-site is asking to cite paper from 2015.
# https://image-net.org/challenges/LSVRC/2012/index#cite
_CITATION = """\
@article{ILSVRC15,
Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title = {{ImageNet Large Scale Visual Recognition Challenge}},
Year = {2015},
journal = {International Journal of Computer Vision (IJCV)},
doi = {10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
"""
_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'
# This file contains the validation labels, in the alphabetic order of
# corresponding image names (and not in the order they have been added to the
# tar file).
_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'
# From https://github.com/cytsai/ilsvrc-cmyk-image-list
CMYK_IMAGES = [
'n01739381_1309.JPEG',
'n02077923_14822.JPEG',
'n02447366_23489.JPEG',
'n02492035_15739.JPEG',
'n02747177_10752.JPEG',
'n03018349_4028.JPEG',
'n03062245_4620.JPEG',
'n03347037_9675.JPEG',
'n03467068_12171.JPEG',
'n03529860_11437.JPEG',
'n03544143_17228.JPEG',
'n03633091_5218.JPEG',
'n03710637_5125.JPEG',
'n03961711_5286.JPEG',
'n04033995_2932.JPEG',
'n04258138_17003.JPEG',
'n04264628_27969.JPEG',
'n04336792_7448.JPEG',
'n04371774_5854.JPEG',
'n04596742_4225.JPEG',
'n07583066_647.JPEG',
'n13037406_4650.JPEG',
]
PNG_IMAGES = ['n02105855_2933.JPEG']
def get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(os.fspath(labels_path)) as labels_f:
# `splitlines` to remove trailing `\r` in Windows
labels = labels_f.read().strip().splitlines()
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def generate_examples_validation(archive, labels):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'label': labels[fname],
}
yield fname, record
def generate_examples_test(archive):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'label': -1,
}
yield fname, record
class Imagenet2012(tfds.core.GeneratorBasedBuilder):
"""Imagenet 2012, aka ILSVRC 2012."""
VERSION = tfds.core.Version('5.1.0')
SUPPORTED_VERSIONS = [
tfds.core.Version('5.0.0'),
]
RELEASE_NOTES = {
'5.1.0':
'Added test split.',
'5.0.0':
'New split API (https://tensorflow.org/datasets/splits)',
'4.0.0':
'(unpublished)',
'3.0.0':
"""
Fix colorization on ~12 images (CMYK -> RGB).
Fix format for consistency (convert the single png image to Jpeg).
Faster generation reading directly from the archive.
""",
'2.0.1':
'Encoding fix. No changes from user point of view.',
'2.0.0':
'Fix validation labels.',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
manual_dir should contain two files: ILSVRC2012_img_train.tar and
ILSVRC2012_img_val.tar.
You need to register on https://image-net.org/download-images in order
to get the link to download the dataset.
"""
def _info(self):
names_file = tfds.core.tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'label': tfds.features.ClassLabel(names_file=names_file),
'file_name': tfds.features.Text(), # Eg: 'n15075141_54.JPEG'
}),
supervised_keys=('image', 'label'),
homepage='https://image-net.org/',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_train.tar')
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
test_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_test.tar')
splits = []
_add_split_if_exists(
split_list=splits,
split=tfds.Split.TRAIN,
split_path=train_path,
dl_manager=dl_manager,
)
_add_split_if_exists(
split_list=splits,
split=tfds.Split.VALIDATION,
split_path=val_path,
dl_manager=dl_manager,
validation_labels=get_validation_labels(val_path),
)
_add_split_if_exists(
split_list=splits,
split=tfds.Split.TEST,
split_path=test_path,
dl_manager=dl_manager,
labels_exist=False,
)
if not splits:
raise AssertionError(
'ImageNet requires manual download of the data. Please download '
'the data and place them into:\n'
f' * train: {train_path}\n'
f' * test: {test_path}\n'
f' * validation: {val_path}\n'
'At least one of the split should be available.')
return splits
def _fix_image(self, image_fname, image):
"""Fix image color system and format starting from v 3.0.0."""
if self.version < '3.0.0':
return image
if image_fname in CMYK_IMAGES:
image = io.BytesIO(tfds.core.utils.jpeg_cmyk_to_rgb(image.read()))
elif image_fname in PNG_IMAGES:
image = io.BytesIO(tfds.core.utils.png_to_jpeg(image.read()))
return image
def _generate_examples(self,
archive,
validation_labels=None,
labels_exist=True):
"""Yields examples."""
if not labels_exist: # Test split
for key, example in generate_examples_test(archive):
yield key, example
if validation_labels: # Validation split
for key, example in generate_examples_validation(archive,
validation_labels):
yield key, example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR_STREAM):
image = self._fix_image(image_fname, image)
record = {
'file_name': image_fname,
'image': image,
'label': label,
}
yield image_fname, record
def _add_split_if_exists(split_list, split, split_path, dl_manager, **kwargs):
"""Add split to given list of splits only if the file exists."""
if not tf.io.gfile.exists(split_path):
logging.warning(
'ImageNet 2012 Challenge %s split not found at %s. '
'Proceeding with data generation anyways but the split will be '
'missing from the dataset...',
str(split),
split_path,
)
else:
split_list.append(
tfds.core.SplitGenerator(
name=split,
gen_kwargs={
'archive': dl_manager.iter_archive(split_path),
**kwargs
},
),)
| {
"content_hash": "8237766a3463088510eb1806d442ff0a",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 220,
"avg_line_length": 36.61666666666667,
"alnum_prop": 0.6747382794720073,
"repo_name": "tensorflow/datasets",
"id": "2aa2fd6ffa72be82a464201d7a30a4309b24d9a2",
"size": "11597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/image_classification/imagenet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.110
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys # noqa: F401
import six # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelNormal,
date,
datetime,
int,
str,
)
try:
from onshape_client.oas.models import body_part
except ImportError:
body_part = sys.modules["onshape_client.oas.models.body_part"]
try:
from onshape_client.oas.models import body_part_media_type
except ImportError:
body_part_media_type = sys.modules["onshape_client.oas.models.body_part_media_type"]
try:
from onshape_client.oas.models import content_disposition
except ImportError:
content_disposition = sys.modules["onshape_client.oas.models.content_disposition"]
try:
from onshape_client.oas.models import form_data_body_part
except ImportError:
form_data_body_part = sys.modules["onshape_client.oas.models.form_data_body_part"]
try:
from onshape_client.oas.models import multi_part
except ImportError:
multi_part = sys.modules["onshape_client.oas.models.multi_part"]
try:
from onshape_client.oas.models import parameterized_header
except ImportError:
parameterized_header = sys.modules["onshape_client.oas.models.parameterized_header"]
class FormDataMultiPart(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"body_parts": ([body_part.BodyPart],), # noqa: E501
"content_disposition": (
content_disposition.ContentDisposition,
), # noqa: E501
"entity": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
"fields": ({str: ([form_data_body_part.FormDataBodyPart],)},), # noqa: E501
"headers": ({str: ([str],)},), # noqa: E501
"media_type": (body_part_media_type.BodyPartMediaType,), # noqa: E501
"message_body_workers": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
"parameterized_headers": (
{str: ([parameterized_header.ParameterizedHeader],)},
), # noqa: E501
"parent": (multi_part.MultiPart,), # noqa: E501
"providers": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"body_parts": "bodyParts", # noqa: E501
"content_disposition": "contentDisposition", # noqa: E501
"entity": "entity", # noqa: E501
"fields": "fields", # noqa: E501
"headers": "headers", # noqa: E501
"media_type": "mediaType", # noqa: E501
"message_body_workers": "messageBodyWorkers", # noqa: E501
"parameterized_headers": "parameterizedHeaders", # noqa: E501
"parent": "parent", # noqa: E501
"providers": "providers", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""form_data_multi_part.FormDataMultiPart - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
body_parts ([body_part.BodyPart]): [optional] # noqa: E501
content_disposition (content_disposition.ContentDisposition): [optional] # noqa: E501
entity (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
fields ({str: ([form_data_body_part.FormDataBodyPart],)}): [optional] # noqa: E501
headers ({str: ([str],)}): [optional] # noqa: E501
media_type (body_part_media_type.BodyPartMediaType): [optional] # noqa: E501
message_body_workers (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
parameterized_headers ({str: ([parameterized_header.ParameterizedHeader],)}): [optional] # noqa: E501
parent (multi_part.MultiPart): [optional] # noqa: E501
providers (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
setattr(self, var_name, var_value)
| {
"content_hash": "47f8a72f1dd7bc94c87dd1255029e0d5",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 114,
"avg_line_length": 36.8755980861244,
"alnum_prop": 0.5710393149085247,
"repo_name": "onshape-public/onshape-clients",
"id": "75f4b03c82c6ebe57defb473f3b610ffb4d25dc9",
"size": "7724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/form_data_multi_part.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
} |
import sys
import os
import re
import string
import imp
from tkinter import *
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
import traceback
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError('No source for module ' + module.__name__)
return file, filename, descr
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif macosxSupport.runningAsOSXApp():
# documentation is stored inside the python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict avalable to
#configDialog.py so it can access all EditorWindow instaces
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow', 'height')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.runningAsOSXApp():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow', 'font-size'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
if is_py_src:
self.color = color = self.ColorDelegator()
per.insertfilter(color)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, str) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 12) != 0 and event.keysym == "Home":
# state&1==shift, state&4==control, state&8==alt
return # <Modifier-Home>; fall back to class binding
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
self.text.mark_set("anchor","insert")
first = self.text.index(dest)
last = self.text.index("anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if macosxSupport.runningAsOSXApp():
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonAquaTk(self.root):
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>"), ...
("Close", "<<close-window>>"), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for label, eventname in self.rmenu_specs:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
self.rmenu = rmenu
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
textView.view_file(self.top,'Help',fn)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file)
else:
self.io.loadfile(file)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END) + 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
rf_list_file = open(self.recent_files_path,'r',
encoding='utf_8', errors='replace')
try:
rf_list = rf_list_file.readlines()
finally:
rf_list_file.close()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
rf_file = open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace')
try:
rf_file.writelines(rf_list)
finally:
rf_file.close()
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(1, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
last_line_of_prompt = sys.ps1.split('\n')[-1]
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16) or self.tabwidth
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
tokens = _tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except _tokenize.TokenError:
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def test():
root = Tk()
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
root.mainloop()
root.destroy()
if __name__ == '__main__':
test()
| {
"content_hash": "c0f74aadcf7bf9ed9810014a47d09227",
"timestamp": "",
"source": "github",
"line_count": 1593,
"max_line_length": 95,
"avg_line_length": 38.64846202134338,
"alnum_prop": 0.5533158997514902,
"repo_name": "MalloyPower/parsing-python",
"id": "6a1e325880c8d06abf9d3405fa4ffc75fa94de3b",
"size": "61567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/idlelib/EditorWindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import urllib2
from bs4 import BeautifulSoup
from parsingLibrary import loadHtmlTags, parseChannel, remove_duplicate_elements
from pymongo import MongoClient
from mongoConfiguration import load_mongo_configuration
mongo_address, mongo_port = load_mongo_configuration()
client = MongoClient(mongo_address, mongo_port)
db = client['freeview']
channelCollection = db['tvChannel']
channelCollection.drop()
channelCategoryCollection = db['tvChannelCategory']
channelCategoryCollection.drop()
channelProviderCollection = db['tvChannelProvider']
channelProviderCollection.drop()
def get_channels(url):
url_channels = 'http://tvguideuk.telegraph.co.uk/' + url
print url
a = BeautifulSoup(urllib2.urlopen(url_channels).read())
channels = a.findAll("div", {"class": "channel_name"})
list_channels = []
for channel in channels:
list_channels.append(channel.text)
return list_channels
def add_type_to_channel(channels_classified, channels_by_type, key, value):
for channel in channels_by_type:
if channel not in channels_classified:
channels_classified[channel] = {}
channels_classified[channel][key] = []
channels_classified[channel][key].append(value.upper())
channels_classified[channel]['name'] = parseChannel(channel.upper())
else:
if key in channels_classified[channel]:
channels_classified[channel][key].append(value.upper())
else:
channels_classified[channel][key] = []
channels_classified[channel][key].append(value.upper())
return channels_classified
def find_channel_classifed(tags):
for tag_url in tags:
# if 'All' in tag_url:
# all_channels = getChannels(tag_url)
if 'Freeview' in tag_url:
freeview_channels = get_channels(tag_url)
if 'Terrestrial' in tag_url:
terrestrial_channels = get_channels(tag_url)
if 'Sky & Cable' in tag_url:
cable_all_channels = get_channels(tag_url)
if 'Films' in tag_url:
films_channels = get_channels(tag_url)
if 'Sport' in tag_url:
sport_channels = get_channels(tag_url)
if 'News & Doc' in tag_url:
news_channels = get_channels(tag_url)
if 'Kids' in tag_url:
kids_channels = get_channels(tag_url)
if 'Radio' in tag_url:
radio_channels = get_channels(tag_url)
channels_classified = {}
add_type_to_channel(channels_classified, freeview_channels, "provider", "FREEVIEW")
add_type_to_channel(channels_classified, terrestrial_channels, "provider", "TERRESTRIAL")
add_type_to_channel(channels_classified, cable_all_channels, "provider", "SKY & CABLE")
add_type_to_channel(channels_classified, films_channels, "category", "FILMS")
add_type_to_channel(channels_classified, sport_channels, "category", "SPORTS")
add_type_to_channel(channels_classified, news_channels, "category", "NEWS & DOCUMENTARY")
add_type_to_channel(channels_classified, kids_channels, "category", "KIDS")
add_type_to_channel(channels_classified, radio_channels, "category", "RADIO")
return channels_classified
from datetime import datetime
day = datetime.now().day
month = datetime.now().month
year = datetime.now().year
hours = ['12am', '2am', '4am', '6am', '8am', '10am', '12pm', '2pm', '4pm', '6pm', '8pm', '10pm']
channels_classified = {}
for hour in hours:
tags = loadHtmlTags(year, month, day, hour, 'All')
channels_classified_temp = find_channel_classifed(tags)
print '-------- ' + hour
for channel_classified_temp in channels_classified_temp:
if channel_classified_temp in channels_classified:
if channels_classified_temp[channel_classified_temp] != channels_classified[channel_classified_temp]:
print "-------- DIFFERENT"
print channels_classified_temp[channel_classified_temp]
print channels_classified[channel_classified_temp]
else:
channels_classified[channel_classified_temp] = channels_classified_temp[channel_classified_temp]
print channel_classified_temp + ' INSERTED'
for channel in channels_classified:
if 'category' not in channels_classified[channel]:
channels_classified[channel]['category'] = ['GENERIC']
if 'provider' in channels_classified[channel]:
channels_classified[channel]['provider']= remove_duplicate_elements(channels_classified[channel]['provider'])
else:
channels_classified[channel]['provider'] = ['UNKNOWN']
channelCollection.insert(channels_classified[channel])
providers = ["FREEVIEW", "TERRESTRIAL", "SKY & CABLE", "UNKOWN"]
categories = ["FILMS", "SPORTS", "NEWS & DOCUMENTARY", "KIDS", "RADIO", "GENERIC"]
for provider in providers:
json_to_insert = {}
json_to_insert['provider'] = provider
channelProviderCollection.insert(json_to_insert)
for category in categories:
json_to_insert = {}
json_to_insert['category'] = category
channelCategoryCollection.insert(json_to_insert)
| {
"content_hash": "52812114a297d6848565f6c7f3b481f4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 117,
"avg_line_length": 41.9344262295082,
"alnum_prop": 0.674941360437842,
"repo_name": "tvlive/tv-crawler",
"id": "a70c27c41fb1427d50a0962eb8ae729a30e6f7ef",
"size": "5139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvChannelLoader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24541"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
} |
"""Evaluates precision@k scores for table retriever predictions."""
from absl import app
from absl import flags
from tapas.scripts import eval_table_retriever_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'prediction_files_local', None,
'A list of files that contain model predictions as a TSV'
'file with headers [table_id, query_rep, table_rep].')
flags.DEFINE_string(
'prediction_files_global', None,
'A list of files that contain model predictions for all'
'of that tables in the corpous. Used as the index to'
'retrieve tables from.')
flags.DEFINE_string(
'retrieval_results_file_path', None,
'A path to file where the best tables candidates and their scores, for each'
'query will be written.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.prediction_files_global:
eval_table_retriever_utils.eval_precision_at_k(
FLAGS.prediction_files_local,
FLAGS.prediction_files_global,
make_tables_unique=True,
retrieval_results_file_path=FLAGS.retrieval_results_file_path)
else:
eval_table_retriever_utils.eval_precision_at_k(
FLAGS.prediction_files_local,
FLAGS.prediction_files_local,
make_tables_unique=True,
retrieval_results_file_path=FLAGS.retrieval_results_file_path)
if __name__ == '__main__':
flags.mark_flag_as_required('prediction_files_local')
app.run(main)
| {
"content_hash": "c8db57f780a3afd4f12773efdc663cd9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 31.67391304347826,
"alnum_prop": 0.7048730267673301,
"repo_name": "google-research/tapas",
"id": "ebcb0ab9a91cc1117aa0f3207f87c4982e401e8e",
"size": "2072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapas/scripts/eval_table_retriever.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "58484"
},
{
"name": "Python",
"bytes": "1182377"
}
],
"symlink_target": ""
} |
import numpy as np
def long_substr(strgs):
"""
Returns a list with the longest common substring sequences from @strgs
Based on: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
"""
# Copy the list
strgs = strgs[:]
if len(strgs) > 1 and len(strgs[0]) > 0:
substrs = []
substr = None
maxlen = 1
while True:
if substr is not None and len(substr) >= maxlen:
# A max lenght seq
substrs.append(substr)
maxlen = len(substr)
for i, s in enumerate(strgs):
strgs[i] = s.replace(substr, '', 1)
elif substr is not None and len(substr) < maxlen:
# Not the first run and not longest seq also
break
substr = ''
for i in range(len(strgs[0])):
for j in range(len(strgs[0]) - i + 1):
if j > len(substr) and all(strgs[0][i:i+j] in x for x in strgs):
substr = strgs[0][i:i+j]
return substrs
elif len(strgs) == 1:
return [strgs[0]] if len(strgs[0]) > 0 else []
else:
return []
def levenshtein(source, target):
""" Computes the Levenshtein distance between 2 strings """
if len(source) < len(target):
return levenshtein(target, source)
# So now we have len(source) >= len(target).
if len(target) == 0:
return len(source)
# We call tuple() to force strings to be used as sequences
# ('c', 'a', 't', 's') - numpy uses them as values by default.
source = np.array(tuple(source))
target = np.array(tuple(target))
# We use a dynamic programming algorithm, but with the
# added optimization that we only need the last two rows
# of the matrix.
previous_row = np.arange(target.size + 1)
for s in source:
# Insertion (target grows longer than source):
current_row = previous_row + 1
# Substitution or matching:
# Target and source items are aligned, and either
# are different (cost of 1), or are the same (cost of 0).
current_row[1:] = np.minimum(current_row[1:],
np.add(previous_row[:-1], target != s))
# Deletion (target grows shorter than source):
current_row[1:] = np.minimum(current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
if __name__ == '__main__':
s1 = 'Oh, hello, my friend...'
s2 = 'I prefer Jelly Belly beans...'
s3 = 'When hell freezes... over!'
print long_substr([s1, s2, s3])
print long_substr(['0', 'a'])
print long_substr(['abba'])
print long_substr([''])
print long_substr([])
print levenshtein(s1, s2)
print levenshtein(s1, s3)
print levenshtein(s2, s3)
| {
"content_hash": "6b6203facebfa9d1c095f910fdd81bf6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 115,
"avg_line_length": 37,
"alnum_prop": 0.5555935682517961,
"repo_name": "iuliux/RegExTractor",
"id": "a2f35efb0d16e3651128cd1d112a475636f5cc07",
"size": "2923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12769"
}
],
"symlink_target": ""
} |
import nose.tools as ns
import os
from os.path import join
from tempfile import gettempdir
from relshell.record import Record
from relshell.recorddef import RecordDef
from relshell.batch import Batch
from shellstreaming.core.batch_queue import BatchQueue
from shellstreaming.ostream.localfile import LocalFile
TEST_FILE = join(gettempdir(), 'shellstreaming_test_localfile.txt')
def teardown():
os.remove(TEST_FILE)
def test_localfile_usage():
# prepare input queue
q = BatchQueue()
for batch in _create_batches():
q.push(batch) # [fix] - Batch's output format has to be customized by user
q.push(None)
# run ostream
ostream = LocalFile(TEST_FILE, output_format='csv', input_queue=q)
ostream.join()
# check contents
with open(TEST_FILE) as f:
ns.eq_(f.read(),
'''"111"
"222"
"333"
'''
)
def _create_batches():
rdef = RecordDef([{'name': 'col0', 'type': 'INT'}])
return (
Batch(rdef, (Record(111), Record(222), )),
Batch(rdef, (Record(333), )),
)
| {
"content_hash": "0049d0c172d2177a2da3338588cbd9bd",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6628571428571428,
"repo_name": "laysakura/shellstreaming",
"id": "f4092ca0133808fc77753d0eabba2e3d539cd41f",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/ostream/test_localfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25465"
},
{
"name": "JavaScript",
"bytes": "49784"
},
{
"name": "Python",
"bytes": "238726"
}
],
"symlink_target": ""
} |
import requests
from flask import Blueprint, request, jsonify, make_response
from app.projects.models import Projects, ProjectsHaulers, ProjectsDebrisbox, ProjectsSchema, TicketsRd, db
from app.facilities.models import FacilitiesSchema
from app.ticketsRd.models import TicketsRdSchema
from app.ticketsSr.models import TicketsSrSchema
from app.materials.models import MaterialsSchema
from app.haulers.models import Haulers
from flask_restful import Api, Resource
from app.helper.helper import Calc
from app.auth.models import token_auth, Security
import json
import datetime
import os
from config import GH_URL
from app.helper.phpserialize import *
from collections import OrderedDict
from sqlalchemy.exc import SQLAlchemyError
from marshmallow import ValidationError
projects = Blueprint('projects', __name__)
# http://marshmallow.readthedocs.org/en/latest/quickstart.html#declaring-schemas
#https://github.com/marshmallow-code/marshmallow-jsonapi
schema = ProjectsSchema()
api = Api(projects)
def buildResult(query):
HAULER_ID = Security.getHaulerId()
result = schema.dump(query).data
#group tickets by facility
tf = {}
m_ids = []
tickets_count = 0
total_weight = 0
total_recycled = 0
total_reused = 0
for ticket in query.tickets:
ticketDump = TicketsRdSchema().dump(ticket).data
if ticket.material and ticketDump['data']['attributes']['HAULER_ID'] == HAULER_ID:
ticket_image = ticket.get_folder(True) + "ticket.jpg"
tickets_count += 1
material = MaterialsSchema().dump(ticket.material).data
material = material['data']['attributes']
ticketDump = ticketDump['data']['attributes']
ticketDump['material'] = material['name']
ticketDump['image'] = ticket_image
split_date = ticketDump['thedate'].split('T')
ticketDump['thedate'] = split_date[0]
if not material['MATERIAL_ID'] in m_ids:
m_ids.append(material['MATERIAL_ID'])
if not ticketDump['FACILITY_ID'] in tf:
tf[ticketDump['FACILITY_ID']] = []
tf[ticketDump['FACILITY_ID']].append(ticketDump)
total_weight += float(ticketDump['weight'])
total_recycled += float(ticketDump['recycled'])
reused_types_tickets = []
donatedTickets = []
reuseTickets = []
salvageTickets = []
for ticket_sr in query.tickets_sr:
ticketSrDump = TicketsSrSchema().dump(ticket_sr).data
if ticket_sr.material and ticketSrDump['data']['attributes']['HAULER_ID'] == HAULER_ID:
ticketSrDump = ticketSrDump['data']['attributes']
tickets_count += 1
folder = ticket_sr.get_folder()
if os.path.isfile(folder + 'ticket.jpg'):
ticket_image = ticket_sr.get_folder(True) + "ticket.jpg"
else:
ticket_image = ''
if os.path.isfile(folder + 'material.jpg'):
ticketSrDump['material_image'] = ticket_sr.get_folder(True) + "material.jpg"
else:
ticketSrDump['material_image'] = ''
if os.path.isfile(folder + 'material2.jpg'):
ticketSrDump['material_image2'] = ticket_sr.get_folder(True) + "material2.jpg"
else:
ticketSrDump['material_image2'] = ''
if os.path.isfile(folder + 'material3.jpg'):
ticketSrDump['material_image3'] = ticket_sr.get_folder(True) + "material3.jpg"
else:
ticketSrDump['material_image3'] = ''
if os.path.isfile(folder + 'material4.jpg'):
ticketSrDump['material_image4'] = ticket_sr.get_folder(True) + "material4.jpg"
else:
ticketSrDump['material_image4'] = ''
material = MaterialsSchema().dump(ticket_sr.material).data
material = material['data']['attributes']
if ticket_sr.facility:
facility = FacilitiesSchema().dump(ticket_sr.facility).data
ticketSrDump['facility'] = facility['data']['attributes']['name']
else:
ticketSrDump['facility'] = ''
inventory = []
if ticket_sr.inventory:
inventoryQuery = db.engine.execute("SELECT name FROM materials_salvage "+
"WHERE MATERIAL_SALVAGE_ID IN("+str(ticket_sr.inventory)+")")
salvage_materials = inventoryQuery.fetchall()
for m in salvage_materials:
inventory.append(m.name)
ticketSrDump['salvage_materials'] = ', '.join(inventory)
ticketSrDump['units'] = 'tons'
ticketSrDump['material'] = material['name']
ticketSrDump['image'] = ticket_image
split_date = ticketSrDump['thedate_ticket'].split('T')
ticketSrDump['thedate_ticket'] = split_date[0]
if not material['MATERIAL_ID'] in m_ids:
m_ids.append(material['MATERIAL_ID'])
if ticketSrDump['CONSTRUCTION_TYPE_ID'] == 18:
donatedTickets.append(ticketSrDump)
ticketSrDump['name'] = 'Donated';
if ticketSrDump['CONSTRUCTION_TYPE_ID'] == 17:
reuseTickets.append(ticketSrDump)
ticketSrDump['name'] = 'Reuse OnSite';
if ticketSrDump['CONSTRUCTION_TYPE_ID'] == 19:
salvageTickets.append(ticketSrDump)
ticketSrDump['name'] = 'Salvage for reuse on other project';
total_weight += float(ticketSrDump['weight'])
total_reused += float(ticketSrDump['weight'])
if len(donatedTickets):
reused_types_tickets.append({'name': 'Donated', 'CONSTRUCTION_TYPE_ID': 18, 'tickets': donatedTickets})
if len(reuseTickets):
reused_types_tickets.append({'name': 'Reuse OnSite', 'CONSTRUCTION_TYPE_ID': 17, 'tickets': reuseTickets})
if len(salvageTickets):
reused_types_tickets.append({'name': 'Salvage for reuse on other project', 'CONSTRUCTION_TYPE_ID': 19, 'tickets': salvageTickets})
result['data']['attributes']['reused_types'] = reused_types_tickets
result['data']['attributes']['materials_hauled'] = len(m_ids)
result['data']['attributes']['tickets_count'] = tickets_count
result['data']['attributes']['total_tons'] = total_weight
result['data']['attributes']['recycled'] = total_recycled
result['data']['attributes']['reused'] = total_reused
result['data']['attributes']['rate'] = Calc.rate(total_weight, total_recycled)
#append facilities with related tickets to result
result['data']['attributes']['facilities'] = []
fids = []
for ticket in query.tickets:
ticketFacility = ticket.facility
if ticketFacility:
facilities = FacilitiesSchema().dump(ticket.facility).data
facility = facilities['data']['attributes']
#prevent add duplictes
if not facility['FACILITY_ID'] in fids and facility['FACILITY_ID'] in tf:
city = ticketFacility.city
county = city.county
fids.append(facility['FACILITY_ID'])
facility['tickets'] = tf[facility['FACILITY_ID']]
facility['city'] = city.name
facility['state'] = county.state
result['data']['attributes']['facilities'].append(facility)
city = query.city
result['data']['attributes']['city'] = city.name
# if len(city.efields):
# try:
# print('========START TERMS=======')
# res = loads(dumps(city.efields), array_hook=OrderedDict)
# print('===1===')
# res = loads(res, object_hook=phpobject)
# print('===2===')
# vendor_terms_key = 'vendor_terms1'.encode("utf-8")
# print('===3===')
# if vendor_terms_key in res:
# print('===4===')
# result['data']['attributes']['vendor_terms'] = str(res[vendor_terms_key],'utf-8')
# else:
# print('===5===')
# result['data']['attributes']['vendor_terms'] = 'The City did not provide Terms and Conditions.'
# except RuntimeError:
# print('===6===')
# result['data']['attributes']['vendor_terms'] = 'The City did not provide Terms and Conditions.'
# else:
# print('===7===')
# result['data']['attributes']['vendor_terms'] = 'The City did not provide Terms and Conditions.'
result['data']['attributes']['vendor_terms'] = ''
return result['data']['attributes']
class ProjectsList(Resource):
@token_auth.login_required
def get(self):
HAULER_ID = Security.getHaulerId()
query = Projects.query.filter(ProjectsHaulers.HAULER_ID==HAULER_ID, ProjectsHaulers.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='approved').all()
haulersIds = []
for project in query:
haulersIds.append(project.PROJECT_ID)
debris = Projects.query.filter(ProjectsDebrisbox.HAULER_ID==HAULER_ID, ProjectsDebrisbox.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='approved').all()
for project in debris:
if not project.PROJECT_ID in haulersIds:
query.append(project)
results = []
for project in query:
results.append(buildResult(project))
db.session.commit()
return results
#return(json.dumps([{"id": 9,"name": "XXXUPDCompleted project name","address": "project address","number": "01","company": "Vendor Company","materials_hauled": "1","total_tons": "0","recycled": "0","rate": "50","tickets_count": "5","facilities": [{"id": 9,"name": "Facility 1","tickets": [{"id": 1,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}, {"id": 2,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}]}]}]))
class CompletedList(Resource):
@token_auth.login_required
def get(self):
HAULER_ID = Security.getHaulerId()
query = Projects.query.filter(ProjectsHaulers.HAULER_ID==HAULER_ID, ProjectsHaulers.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='completed').all()
haulersIds = []
for project in query:
haulersIds.append(project.PROJECT_ID)
debris = Projects.query.filter(ProjectsDebrisbox.HAULER_ID==HAULER_ID, ProjectsDebrisbox.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='completed').all()
for project in debris:
if not project.PROJECT_ID in haulersIds:
query.append(project)
results = []
for project in query:
results.append(buildResult(project))
db.session.commit()
return results
#return(json.dumps([{"id": 9,"name": "XXXUPDCompleted project name","address": "project address","number": "01","company": "Vendor Company","materials_hauled": "1","total_tons": "0","recycled": "0","rate": "50","tickets_count": "5","facilities": [{"id": 9,"name": "Facility 1","tickets": [{"id": 1,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}, {"id": 2,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}]}]}]))
class CompletedCount(Resource):
@token_auth.login_required
def get(self):
HAULER_ID = Security.getHaulerId()
query = Projects.query.filter(ProjectsHaulers.HAULER_ID==HAULER_ID, ProjectsHaulers.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='completed').all()
haulersIds = []
for project in query:
haulersIds.append(project.PROJECT_ID)
debris = Projects.query.filter(ProjectsDebrisbox.HAULER_ID==HAULER_ID, ProjectsDebrisbox.PROJECT_ID==Projects.PROJECT_ID, Projects.status=='completed').all()
for project in debris:
if not project.PROJECT_ID in haulersIds:
haulersIds.append(project.PROJECT_ID)
results = len(haulersIds)
db.session.commit()
return results
#return(json.dumps([{"id": 9,"name": "XXXUPDCompleted project name","address": "project address","number": "01","company": "Vendor Company","materials_hauled": "1","total_tons": "0","recycled": "0","rate": "50","tickets_count": "5","facilities": [{"id": 9,"name": "Facility 1","tickets": [{"id": 1,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}, {"id": 2,"ticket": "ticket number","material": "Material Name","submitted_by": "Submitted By","weight": "100","recycled": "50","rate": "90","date": "7/26/2016"}]}]}]))
class ProjectsUpdate(Resource):
@token_auth.login_required
def get(self, id):
db.session.commit()
query = Projects.query.get_or_404(id)
return buildResult(query)
@token_auth.login_required
def patch(self, id):
project = Projects.query.get_or_404(id)
raw_dict = {"data": {"attributes": request.form, "type": "projects"}}
try:
HAULER_ID = Security.getHaulerId()
hauler = Haulers.query.get_or_404(HAULER_ID)
schema.validate(raw_dict)
params_dict = raw_dict['data']['attributes']
#for key, value in params_dict.items():
#setattr(project, key, value)
if 'vendor_terms_agree' in params_dict:
setattr(project, 'vendor_terms_agree', 'true')
project.update()
note = json.dumps('Vendor {0} has agreed to project terms and has accepted'.format(hauler.name))
query = db.engine.execute("INSERT INTO projects_notes (DID, PROJECT_ID, UID, note, thedate) VALUES (75, {0}, {1}, {2}, NOW())".format(id, project.UID, note))
if 'status' in params_dict:
setattr(project, 'status', 'submitted_for_final')
setattr(project, 'final_HAULER_ID', HAULER_ID)
setattr(project, 'final_thedate', datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
project.update()
db.session.commit()
return (id)
except ValidationError as err:
resp = jsonify({"error": err.messages})
resp.status_code = 401
return resp
except SQLAlchemyError as e:
db.session.rollback()
resp = jsonify({"error": str(e)})
resp.status_code = 401
return resp
class VendorTerms(Resource):
@token_auth.login_required
def get(self, id):
print('{0}/?func=cities/config/get_terms&CITY_ID={1}'.format(GH_URL, id))
r = requests.get('{0}/?func=cities/config/get_terms&CITY_ID={1}'.format(GH_URL, id), verify=False)
return (r.text)
api.add_resource(ProjectsList, '.json')
api.add_resource(CompletedList, '/completed.json')
api.add_resource(CompletedCount, '/completed_count.json')
api.add_resource(ProjectsUpdate, '/<int:id>.json')
api.add_resource(VendorTerms, '/terms/city/<int:id>.json') | {
"content_hash": "004df2784616ae11bfbbee212edcb8af",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 626,
"avg_line_length": 50.88823529411765,
"alnum_prop": 0.53826147266212,
"repo_name": "konstantinKim/vd-backend",
"id": "87218a68e57e9d956f36398a9103064d5fcf2c82",
"size": "17302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/projects/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190141"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
def data_check(data,target):
""" Checks data type
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
target : int or str
Target column
Returns
----------
transformed_data : np.array
Raw data array for use in the model
data_name : str
Name of the data
is_pandas : Boolean
True if pandas data, else numpy
data_index : np.array
The time indices for the data
"""
# Check pandas or numpy
if isinstance(data, pd.DataFrame) or isinstance(data, pd.core.frame.DataFrame):
data_index = data.index
if target is None:
transformed_data = data.ix[:,0].values
data_name = str(data.columns.values[0])
else:
transformed_data = data[target].values
data_name = str(target)
is_pandas = True
elif isinstance(data, np.ndarray):
data_name = "Series"
is_pandas = False
if any(isinstance(i, np.ndarray) for i in data):
if target is None:
transformed_data = data[0]
data_index = list(range(len(data[0])))
else:
transformed_data = data[target]
data_index = list(range(len(data[target])))
else:
transformed_data = data
data_index = list(range(len(data)))
else:
raise Exception("The data input is not pandas or numpy compatible!")
return transformed_data, data_name, is_pandas, data_index
def mv_data_check(data,check):
# Check pandas or numpy
if isinstance(data, pd.DataFrame):
data_index = data.index
transformed_data = data.values
data_name = data.columns.values
is_pandas = True
elif isinstance(data, np.ndarray):
data_name = np.asarray(range(1,len(data[0])+1))
is_pandas = False
transformed_data = data
data_index = list(range(len(data[0])))
else:
raise Exception("The data input is not pandas or numpy compatible!")
return transformed_data, data_name, is_pandas, data_index
| {
"content_hash": "ec2827622da0f1198c15f71c3f87c0d1",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 83,
"avg_line_length": 30.272727272727273,
"alnum_prop": 0.5564135564135564,
"repo_name": "RJT1990/pyflux",
"id": "131a12c0ade1debdcb59b6e436415537898c0bef",
"size": "2331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyflux/data_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1918616"
}
],
"symlink_target": ""
} |
import sys
from domains.support.lib.common import *
import re
from pprint import pprint
def getRrdFiles():
return getRrdFilesInfo(getFileList("/var/webServer/static/rrd", recursive=True, match='^armore-.*'))
def parseInfo(paramList):
ds = {}
cf = {}
for p in paramList.split("\n"):
dsSearch = re.search('^ds\[(\w+)\]', p)
cfSearch = re.search('.*\.cf = "(\w+)"', p)
if dsSearch:
ds.update({dsSearch.group(1): 1})
if cfSearch:
cf.update({cfSearch.group(1): 1})
return list(ds.keys()), list(cf.keys())
def getRrdFilesInfo(rrdFiles):
paramList = {}
newDict = {}
for d in rrdFiles:
connFrom, connTo = [re.sub('_', '.', x) for x in (re.sub('armore-', '', d.split('/')[-1])).split('__')]
if connFrom in newDict:
if not connTo in newDict[connFrom]:
newDict[connFrom][connTo] = []
else:
newDict[connFrom] = {connTo: []}
for f in rrdFiles[d]:
paramList = cmd(["rrdtool", "info", "{0}/{1}".format(d, f)])
ds, cf = parseInfo(paramList)
rrdFiles[d][f]['ds'] = ds
rrdFiles[d][f]['cf'] = cf
newDict[connFrom][connTo].append(f)
return newDict
| {
"content_hash": "d65c3773e6bd5901b9aea6c2d97a58a3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 111,
"avg_line_length": 30.11904761904762,
"alnum_prop": 0.5359683794466403,
"repo_name": "GridProtectionAlliance/ARMORE",
"id": "9115223bd6a5333c73233388de73384ccbb87f2c",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/webServer/domains/support/rrd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "918532"
},
{
"name": "C++",
"bytes": "199619"
},
{
"name": "CSS",
"bytes": "14998"
},
{
"name": "Elixir",
"bytes": "19273"
},
{
"name": "HTML",
"bytes": "122769"
},
{
"name": "JavaScript",
"bytes": "961544"
},
{
"name": "Makefile",
"bytes": "29364"
},
{
"name": "Python",
"bytes": "267525"
},
{
"name": "Roff",
"bytes": "6670"
},
{
"name": "Shell",
"bytes": "106609"
},
{
"name": "Zeek",
"bytes": "514415"
}
],
"symlink_target": ""
} |
import sys
import os
import argparse
import json
import importlib
import getpass
from datetime import datetime
import requests
import boto
from boto.s3 import connect_to_region
from boto.s3.connection import OrdinaryCallingFormat
from drift.utils import get_tier_name
from drift.management.gittools import get_branch, get_commit, get_repo_url, get_git_version
TIERS_CONFIG_FILENAME = "tiers-config.json"
def get_commands():
commands = [
f[:-3]
for f in os.listdir(os.path.join(__path__[0], "commands"))
if not f.startswith("_") and f.endswith(".py")
]
return commands
def execute_cmd():
return do_execute_cmd(sys.argv[1:])
def do_execute_cmd(argv):
valid_commands = get_commands()
parser = argparse.ArgumentParser(description="")
parser.add_argument("-v", "--verbose", help="I am verbose!", action="store_true")
parser.add_argument("-t", "--tier", help="Tier to use (overrides drift_TIER from environment)")
subparsers = parser.add_subparsers(help="sub-command help")
for cmd in valid_commands:
module = importlib.import_module("drift.management.commands." + cmd)
subparser = subparsers.add_parser(cmd, help="Subcommands for {}".format(cmd))
if hasattr(module, "get_options"):
module.get_options(subparser)
subparser.set_defaults(func=module.run_command)
args = parser.parse_args(argv)
if args.tier:
os.environ["drift_TIER"] = args.tier
args.func(args)
def get_config_path(file_name=None, folder=None):
"""Returns a full path to a configuration folder for the local user, or a
file in that folder.
If 'file_name' is set, the function returns a path to the file inside
the config folder specified by 'folder'.
If 'folder' is not specified, it defaults to ".drift".
If the folder doesn't exist, it's created automatically with no files in it.
"""
folder = folder or ".drift"
config_path = os.path.join(os.path.expanduser("~"), folder)
if not os.path.exists(config_path):
os.makedirs(config_path)
# Special case for .ssh folder
if folder == ".ssh":
os.chmod(config_path, 0o700)
if file_name:
config_path = os.path.join(config_path, file_name)
return config_path
def get_s3_bucket(tiers_config):
conn = connect_to_region(tiers_config["region"], calling_format=OrdinaryCallingFormat())
bucket_name = "{}.{}".format(tiers_config["bucket"], tiers_config["domain"])
bucket = conn.get_bucket(bucket_name)
return bucket
def get_tiers_config(display_title=True):
config_file = get_config_path(TIERS_CONFIG_FILENAME)
if not os.path.exists(config_file):
print "No tiers configuration file found. Use the 'init' command to initialize."
sys.exit(1)
tiers_config = json.load(open(config_file))
tier_selection_file = get_config_path("TIER")
if not os.path.exists(tier_selection_file):
if display_title:
print "Note: No tier selected. Use the 'use' command to select a tier."
else:
tier_name = open(tier_selection_file).read().strip()
tier_filename = get_config_path("{}.json".format(tier_name))
if not os.path.exists(tier_filename):
os.remove(tier_selection_file)
return get_tiers_config(display_title)
tiers_config["active_tier"] = json.load(open(tier_filename))
if display_title:
print "Active domain: {} [{}]".format(tiers_config["title"], tiers_config["domain"])
if "active_tier" in tiers_config:
print "Active tier: {}".format(tiers_config["active_tier"]["tier"])
return tiers_config
def fetch(path):
"""Read the contents of the file or url pointed to by 'path'."""
try:
with open(path) as f:
return f.read()
except Exception as e1:
pass
try:
r = requests.get(path)
r.raise_for_status()
return r.text
except Exception as e2:
pass
try:
region, bucket_name, key_name = path.split("/", 2)
conn = connect_to_region(region, calling_format=OrdinaryCallingFormat())
bucket = conn.lookup(bucket_name)
data = bucket.get_key(key_name).get_contents_as_string()
return data
except Exception as e3:
pass
print "Can't fetch '{}'".format(path)
print " Not a file:", e1
print " Not an URL:", e2
print " Not a bucket:", e3
def get_tier_config():
"""Fetches information for the specified tier local config
"""
tier_name = get_tier_name()
config_path = os.path.join(os.path.expanduser("~"), ".drift")
with open(os.path.join(config_path, "{}.json".format(tier_name))) as f:
config = json.load(f)
return config
def get_service_info():
# TODO: error checking
config_filename = os.environ["drift_CONFIG"]
config = json.load(open(config_filename))
service_name = config["name"]
service_path = os.path.dirname(os.path.dirname(config_filename))
with open(os.path.join(service_path, "VERSION")) as f:
service_version = f.read().strip()
return {
"name": service_name,
"version": service_version,
}
def get_ec2_instances(region, filters=None):
"""
Returns all EC2 instances in the region.
'filters' is passed to the 'get_all_reservations' function.
"""
conn = boto.ec2.connect_to_region(region)
reservations = conn.get_all_reservations(filters=filters)
instances = [i for r in reservations for i in r.instances]
return instances
def create_deployment_manifest(method):
"""Returns a dict describing the current deployable."""
service_info = get_service_info()
commit = get_commit()
version = get_git_version()
info = {
'deployable': service_info['name'],
'method': method,
'username': getpass.getuser(),
'datetime': datetime.utcnow().isoformat(),
'branch': get_branch(),
'commit': commit,
'commit_url': get_repo_url() + "/commit/" + commit,
'release': version['tag'] if version else 'untagged-branch'
}
return info
| {
"content_hash": "0c9336d41ee205f2897cbf9574de2b42",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 99,
"avg_line_length": 31.18686868686869,
"alnum_prop": 0.6414574898785426,
"repo_name": "1939Games/drift",
"id": "498a6f10eea15f958a9108d6b7196d0c623aed34",
"size": "6197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drift/management/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2371"
},
{
"name": "Python",
"bytes": "274810"
},
{
"name": "Shell",
"bytes": "3540"
}
],
"symlink_target": ""
} |
"""
Package for prototype.
"""
| {
"content_hash": "d8546e4799fe8bbe41c2aec33acf9c7f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 22,
"avg_line_length": 10.333333333333334,
"alnum_prop": 0.6129032258064516,
"repo_name": "guilatrova/customercontrol-api",
"id": "77a798048134c01f2643e9edbeabb84ca104cf8f",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prototype/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10862"
}
],
"symlink_target": ""
} |
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HostGroupRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'constraint': 'Constraint',
'recipe_ids': 'list[int]',
'recovery_mode': 'str',
'recipes': 'list[RecipeRequest]',
'recipe_names': 'list[str]'
}
attribute_map = {
'name': 'name',
'constraint': 'constraint',
'recipe_ids': 'recipeIds',
'recovery_mode': 'recoveryMode',
'recipes': 'recipes',
'recipe_names': 'recipeNames'
}
def __init__(self, name=None, constraint=None, recipe_ids=None, recovery_mode=None, recipes=None, recipe_names=None):
"""
HostGroupRequest - a model defined in Swagger
"""
self._name = None
self._constraint = None
self._recipe_ids = None
self._recovery_mode = None
self._recipes = None
self._recipe_names = None
self.name = name
self.constraint = constraint
if recipe_ids is not None:
self.recipe_ids = recipe_ids
if recovery_mode is not None:
self.recovery_mode = recovery_mode
if recipes is not None:
self.recipes = recipes
if recipe_names is not None:
self.recipe_names = recipe_names
@property
def name(self):
"""
Gets the name of this HostGroupRequest.
name of the resource
:return: The name of this HostGroupRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this HostGroupRequest.
name of the resource
:param name: The name of this HostGroupRequest.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def constraint(self):
"""
Gets the constraint of this HostGroupRequest.
instance group or resource constraint for a hostgroup
:return: The constraint of this HostGroupRequest.
:rtype: Constraint
"""
return self._constraint
@constraint.setter
def constraint(self, constraint):
"""
Sets the constraint of this HostGroupRequest.
instance group or resource constraint for a hostgroup
:param constraint: The constraint of this HostGroupRequest.
:type: Constraint
"""
if constraint is None:
raise ValueError("Invalid value for `constraint`, must not be `None`")
self._constraint = constraint
@property
def recipe_ids(self):
"""
Gets the recipe_ids of this HostGroupRequest.
referenced recipe ids
:return: The recipe_ids of this HostGroupRequest.
:rtype: list[int]
"""
return self._recipe_ids
@recipe_ids.setter
def recipe_ids(self, recipe_ids):
"""
Sets the recipe_ids of this HostGroupRequest.
referenced recipe ids
:param recipe_ids: The recipe_ids of this HostGroupRequest.
:type: list[int]
"""
self._recipe_ids = recipe_ids
@property
def recovery_mode(self):
"""
Gets the recovery_mode of this HostGroupRequest.
recovery mode of the hostgroup's nodes
:return: The recovery_mode of this HostGroupRequest.
:rtype: str
"""
return self._recovery_mode
@recovery_mode.setter
def recovery_mode(self, recovery_mode):
"""
Sets the recovery_mode of this HostGroupRequest.
recovery mode of the hostgroup's nodes
:param recovery_mode: The recovery_mode of this HostGroupRequest.
:type: str
"""
allowed_values = ["MANUAL", "AUTO"]
if recovery_mode not in allowed_values:
raise ValueError(
"Invalid value for `recovery_mode` ({0}), must be one of {1}"
.format(recovery_mode, allowed_values)
)
self._recovery_mode = recovery_mode
@property
def recipes(self):
"""
Gets the recipes of this HostGroupRequest.
referenced recipes
:return: The recipes of this HostGroupRequest.
:rtype: list[RecipeRequest]
"""
return self._recipes
@recipes.setter
def recipes(self, recipes):
"""
Sets the recipes of this HostGroupRequest.
referenced recipes
:param recipes: The recipes of this HostGroupRequest.
:type: list[RecipeRequest]
"""
self._recipes = recipes
@property
def recipe_names(self):
"""
Gets the recipe_names of this HostGroupRequest.
referenced recipe names
:return: The recipe_names of this HostGroupRequest.
:rtype: list[str]
"""
return self._recipe_names
@recipe_names.setter
def recipe_names(self, recipe_names):
"""
Sets the recipe_names of this HostGroupRequest.
referenced recipe names
:param recipe_names: The recipe_names of this HostGroupRequest.
:type: list[str]
"""
self._recipe_names = recipe_names
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HostGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "d25b5d3125c98afc8a191308bbc000a8",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 984,
"avg_line_length": 30.40590405904059,
"alnum_prop": 0.5889563106796116,
"repo_name": "Chaffelson/whoville",
"id": "5ac69ff308ccdf764e10e63eacf9b9995bbcbaa5",
"size": "8257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whoville/cloudbreak/models/host_group_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6961"
},
{
"name": "HTML",
"bytes": "72038"
},
{
"name": "Python",
"bytes": "3729355"
},
{
"name": "Shell",
"bytes": "95963"
},
{
"name": "TSQL",
"bytes": "345"
}
],
"symlink_target": ""
} |
import sys, os
sys.path.insert(0, '..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyssh-ctypes'
copyright = '2013, Andrey Antukh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyssh-ctypesdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyssh-ctypes.tex', 'pyssh-ctypes Documentation',
'Andrey Antukh', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyssh-ctypes', 'pyssh-ctypes Documentation',
['Andrey Antukh'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyssh-ctypes', 'pyssh-ctypes Documentation',
'Andrey Antukh', 'pyssh-ctypes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "bf1b70980d19e13b8078fc28ddcc0343",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 80,
"avg_line_length": 32.271186440677965,
"alnum_prop": 0.7022058823529411,
"repo_name": "niwinz/pyssh-ctypes",
"id": "58013334aae9517e3b072ba65e95f5ca83db9ece",
"size": "8062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39227"
},
{
"name": "Shell",
"bytes": "52"
}
],
"symlink_target": ""
} |
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import platform
from pyglet.gl.base import Config, CanvasConfig, Context
from pyglet.gl import ContextException
from pyglet.gl import gl
from pyglet.gl import agl
from pyglet.canvas.cocoa import CocoaCanvas
from pyglet.libs.darwin.cocoapy import *
NSOpenGLPixelFormat = ObjCClass('NSOpenGLPixelFormat')
NSOpenGLContext = ObjCClass('NSOpenGLContext')
# Version info, needed as OpenGL different Lion and onward
"""Version is based on Darwin kernel, not OS-X version.
OS-X / Darwin version history
http://en.wikipedia.org/wiki/Darwin_(operating_system)#Release_history
pre-release: 0.1, 0.2, 1.0, 1.1,
kodiak: 1.2.1,
cheetah: 1.3.1,
puma: 1.4.1, 5.1 -> 5.5
jaguar: 6.0.1 -> 6.8
panther: 7.0 -> 7.9
tiger: 8.0 -> 8.11
leopard: 9.0 -> 9.8
snow_leopard: 10.0 -> 10.8
lion: 11.0 -> 11.4
mountain_lion: 12.0 -> ?
mavericks: 13.0 ->
"""
os_x_release = {
'pre-release': (0,1),
'kodiak': (1,2,1),
'cheetah': (1,3,1),
'puma': (1,4.1),
'jaguar': (6,0,1),
'panther': (7,),
'tiger': (8,),
'leopard': (9,),
'snow_leopard': (10,),
'lion': (11,),
'mountain_lion': (12,),
'mavericks': (13,),
}
def os_x_version():
version = tuple(int(x) for x in platform.release().split('.'))
# ensure we return a tuple
if len(version) > 0:
return version
return (version,)
_os_x_version = os_x_version()
# Valid names for GL attributes and their corresponding NSOpenGL constant.
_gl_attributes = {
'double_buffer': NSOpenGLPFADoubleBuffer,
'stereo': NSOpenGLPFAStereo,
'buffer_size': NSOpenGLPFAColorSize,
'sample_buffers': NSOpenGLPFASampleBuffers,
'samples': NSOpenGLPFASamples,
'aux_buffers': NSOpenGLPFAAuxBuffers,
'alpha_size': NSOpenGLPFAAlphaSize,
'depth_size': NSOpenGLPFADepthSize,
'stencil_size': NSOpenGLPFAStencilSize,
# Not exposed by pyglet API (set internally)
'all_renderers': NSOpenGLPFAAllRenderers,
'fullscreen': NSOpenGLPFAFullScreen,
'minimum_policy': NSOpenGLPFAMinimumPolicy,
'maximum_policy': NSOpenGLPFAMaximumPolicy,
'screen_mask' : NSOpenGLPFAScreenMask,
# Not supported in current pyglet API
'color_float': NSOpenGLPFAColorFloat,
'offscreen': NSOpenGLPFAOffScreen,
'sample_alpha': NSOpenGLPFASampleAlpha,
'multisample': NSOpenGLPFAMultisample,
'supersample': NSOpenGLPFASupersample,
}
# NSOpenGL constants which do not require a value.
_boolean_gl_attributes = frozenset([
NSOpenGLPFAAllRenderers,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFAStereo,
NSOpenGLPFAMinimumPolicy,
NSOpenGLPFAMaximumPolicy,
NSOpenGLPFAOffScreen,
NSOpenGLPFAFullScreen,
NSOpenGLPFAColorFloat,
NSOpenGLPFAMultisample,
NSOpenGLPFASupersample,
NSOpenGLPFASampleAlpha,
])
# Attributes for which no NSOpenGLPixelFormatAttribute name exists.
# We could probably compute actual values for these using
# NSOpenGLPFAColorSize / 4 and NSOpenGLFAAccumSize / 4, but I'm not that
# confident I know what I'm doing.
_fake_gl_attributes = {
'red_size': 0,
'green_size': 0,
'blue_size': 0,
'accum_red_size': 0,
'accum_green_size': 0,
'accum_blue_size': 0,
'accum_alpha_size': 0
}
class CocoaConfig(Config):
def match(self, canvas):
# Construct array of attributes for NSOpenGLPixelFormat
attrs = []
for name, value in self.get_gl_attributes():
attr = _gl_attributes.get(name)
if not attr or not value:
continue
attrs.append(attr)
if attr not in _boolean_gl_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant.
attrs.append(NSOpenGLPFAAllRenderers)
# Force selection policy.
attrs.append(NSOpenGLPFAMaximumPolicy)
# NSOpenGLPFAFullScreen is always supplied so we can switch to and
# from fullscreen without losing the context. Also must supply the
# NSOpenGLPFAScreenMask attribute with appropriate display ID.
# Note that these attributes aren't necessary to render in fullscreen
# on Mac OS X 10.6, because there we are simply rendering into a
# screen sized window. See:
# http://developer.apple.com/library/mac/#documentation/GraphicsImaging/Conceptual/OpenGL-MacProgGuide/opengl_fullscreen/opengl_cgl.html%23//apple_ref/doc/uid/TP40001987-CH210-SW6
# Otherwise, make sure we refer to the correct Profile for OpenGL (Core or
# Legacy) on Lion and afterwards
if _os_x_version < os_x_release['snow_leopard']:
attrs.append(NSOpenGLPFAFullScreen)
attrs.append(NSOpenGLPFAScreenMask)
attrs.append(quartz.CGDisplayIDToOpenGLDisplayMask(quartz.CGMainDisplayID()))
elif _os_x_version >= os_x_release['lion']:
# check for opengl profile
# This requires OS-X Lion (Darwin 11) or higher
version = (
getattr(self, 'major_version', None),
getattr(self, 'minor_version', None)
)
# tell os-x we want to request a profile
attrs.append(NSOpenGLPFAOpenGLProfile)
# check if we're wanting core or legacy
# as of OS-X (Mountain)Lion, there is only
# Legacy and Core 3.2
if version == (3, 2):
attrs.append(int(NSOpenGLProfileVersion3_2Core))
else:
attrs.append(int(NSOpenGLProfileVersionLegacy))
# Terminate the list.
attrs.append(0)
# Create the pixel format.
attrsArrayType = c_uint32 * len(attrs)
attrsArray = attrsArrayType(*attrs)
pixel_format = NSOpenGLPixelFormat.alloc().initWithAttributes_(attrsArray)
# Return the match list.
if pixel_format is None:
return []
else:
return [CocoaCanvasConfig(canvas, self, pixel_format)]
class CocoaCanvasConfig(CanvasConfig):
def __init__(self, canvas, config, pixel_format):
super(CocoaCanvasConfig, self).__init__(canvas, config)
self._pixel_format = pixel_format
# Query values for the attributes of the pixel format, and then set the
# corresponding attributes of the canvas config.
for name, attr in _gl_attributes.items():
vals = c_int()
self._pixel_format.getValues_forAttribute_forVirtualScreen_(byref(vals), attr, 0)
setattr(self, name, vals.value)
# Set these attributes so that we can run pyglet.info.
for name, value in _fake_gl_attributes.items():
setattr(self, name, value)
# Update the minor/major version from profile if (Mountain)Lion
if _os_x_version >= os_x_release['lion']:
vals = c_int()
profile = self._pixel_format.getValues_forAttribute_forVirtualScreen_(
byref(vals),
NSOpenGLPFAOpenGLProfile,
0
)
if profile == NSOpenGLProfileVersion3_2Core:
setattr(self, "major_version", 3)
setattr(self, "minor_version", 2)
else:
setattr(self, "major_version", 2)
setattr(self, "minor_version", 1)
def create_context(self, share):
# Determine the shared NSOpenGLContext.
if share:
share_context = share._nscontext
else:
share_context = None
# Create a new NSOpenGLContext.
nscontext = NSOpenGLContext.alloc().initWithFormat_shareContext_(
self._pixel_format,
share_context)
return CocoaContext(self, nscontext, share)
def compatible(self, canvas):
return isinstance(canvas, CocoaCanvas)
class CocoaContext(Context):
def __init__(self, config, nscontext, share):
super(CocoaContext, self).__init__(config, share)
self.config = config
self._nscontext = nscontext
def attach(self, canvas):
# See if we want OpenGL 3 in a non-Lion OS
if _os_x_version < os_x_release['lion'] and self.config._requires_gl_3():
raise ContextException('OpenGL 3 not supported')
super(CocoaContext, self).attach(canvas)
# The NSView instance should be attached to a nondeferred window before calling
# setView, otherwise you get an "invalid drawable" message.
self._nscontext.setView_(canvas.nsview)
self.set_current()
def detach(self):
super(CocoaContext, self).detach()
self._nscontext.clearDrawable()
def set_current(self):
self._nscontext.makeCurrentContext()
super(CocoaContext, self).set_current()
def update_geometry(self):
# Need to call this method whenever the context drawable (an NSView)
# changes size or location.
self._nscontext.update()
def set_full_screen(self):
self._nscontext.makeCurrentContext()
self._nscontext.setFullScreen()
def destroy(self):
super(CocoaContext, self).destroy()
self._nscontext.release()
self._nscontext = None
def set_vsync(self, vsync=True):
vals = c_int(vsync)
self._nscontext.setValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
def get_vsync(self):
vals = c_int()
self._nscontext.getValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
return vals.value
def flip(self):
self._nscontext.flushBuffer()
| {
"content_hash": "916732aae0037311ca93d3730b46d231",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 187,
"avg_line_length": 34.23157894736842,
"alnum_prop": 0.6274087740877409,
"repo_name": "infowantstobeseen/pyglet-darwincore",
"id": "e0b0c4e4ac394be7995e8bada85635e064c0e8cb",
"size": "9779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet/gl/cocoa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "6187958"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os.path
import shutil
import tempfile
from unittest.mock import Mock
# Django imports
from django.contrib.auth.models import Group
from django.http import Http404
from django.test import override_settings, TestCase, RequestFactory
from django.urls import reverse
from django.utils.timezone import now as django_now
# Project imports
from .base import TestJobBase, ResponseCheckerMixin
from .factories import generate_arabic_place_name
from ..constants import ROLLGEN_FLAG_FILENAME
from ..forms import NewJobForm
from ..job import Job
from ..utils import NoVotersError, handle_job_exception
from ..views import JobOverview, is_office_dir, is_rollgen_output_dir_decorator
from libya_site.tests.factories import UserFactory
from register.tests.factories import RegistrationCenterFactory
ROLLGEN_READ_VIEW_NAMES = ('overview', 'browse_job_offices', 'browse_job_centers',
'browse_office_view', 'serve_pdf', 'serve_zip', 'polling_csv', )
ROLLGEN_CREATE_VIEW_NAMES = ('new', )
ROLLGEN_ALL_VIEW_NAMES = ROLLGEN_READ_VIEW_NAMES + ROLLGEN_CREATE_VIEW_NAMES
class ViewsFunctionsTestCase(TestCase):
"""Exercise functions in views.py"""
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
# Clean up.
shutil.rmtree(cls.temp_dir)
def test_is_office_dir(self):
"""Exercise is_office_dir()"""
# False for non-path
self.assertFalse(is_office_dir('/this_path_does_not_exist/skjg/dfkgnl/dfgkjerh'))
# False for path w/basename not an int
path = os.path.join(self.temp_dir, 'aaa')
os.mkdir(path)
self.assertFalse(is_office_dir(path))
# True for path w/integer basename
path = os.path.join(self.temp_dir, '42')
os.mkdir(path)
self.assertTrue(is_office_dir(path))
def test_is_rollgen_output_dir_decorator_positive_case(self):
"""ensure is_rollgen_output_dir_decorator() works"""
job_name = 'this_is_a_rollgen_job'
path = os.path.join(self.temp_dir, job_name)
os.mkdir(path)
with open(os.path.join(path, ROLLGEN_FLAG_FILENAME), 'w') as f:
f.write(' ')
func = Mock()
decorated = is_rollgen_output_dir_decorator(func)
factory = RequestFactory()
with override_settings(ROLLGEN_OUTPUT_DIR=self.temp_dir):
request = factory.get(reverse('rollgen:browse_job_offices', args=(job_name, )))
decorated(request, dirname=job_name)
self.assertTrue(func.called)
def test_is_rollgen_output_dir_decorator_not_a_directory(self):
"""ensure is_rollgen_output_dir_decorator() 404s on a non-existent directory"""
job_name = 'this_path_does_not_exist'
func = Mock()
decorated = is_rollgen_output_dir_decorator(func)
factory = RequestFactory()
with override_settings(ROLLGEN_OUTPUT_DIR=self.temp_dir):
request = factory.get(reverse('rollgen:browse_job_offices', args=(job_name, )))
with self.assertRaises(Http404):
decorated(request, dirname=job_name)
self.assertFalse(func.called)
def test_is_rollgen_output_dir_decorator_not_a_rollgen_directory(self):
"""ensure is_rollgen_output_dir_decorator() 404s on a non-rollgen directory"""
job_name = 'this_is_not_a_rollgen_job'
path = os.path.join(self.temp_dir, job_name)
os.mkdir(path)
func = Mock()
decorated = is_rollgen_output_dir_decorator(func)
factory = RequestFactory()
with override_settings(ROLLGEN_OUTPUT_DIR=self.temp_dir):
request = factory.get(reverse('rollgen:browse_job_offices', args=(job_name, )))
with self.assertRaises(Http404):
decorated(request, dirname=job_name)
self.assertFalse(func.called)
class ViewsFailedJobTestCase(ResponseCheckerMixin, TestJobBase):
"""Exercise views when a job has failed"""
@property
def faux_output_dir(self):
return os.path.normpath(os.path.join(self.output_path, '..'))
def setUp(self):
super(ViewsFailedJobTestCase, self).setUp()
self.user = UserFactory(password='kittens!')
self.user.is_superuser = True
self.user.save()
self.assertTrue(self.client.login(username=self.user.username, password='kittens!'))
# Generate a center with no voters to force an error when the job runs.
self.no_voters_center = RegistrationCenterFactory(name=generate_arabic_place_name())
phase = 'in-person'
self.input_arguments['phase'] = phase
self.input_arguments['center_ids'] = [self.no_voters_center.center_id]
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
self.job = Job(phase, [self.no_voters_center], self.input_arguments, self.user.username,
self.output_path)
try:
self.job.generate_rolls()
except NoVotersError as exception:
# This is expected. (In fact, it's the whole point of the test.)
handle_job_exception(exception, self.job.output_path)
self.dirname = os.path.basename(self.job.output_path)
def test_browse_job_offices_view(self):
"""Generate a job offices view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_offices', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/job_failed_view.html')
context = response.context
expected_keys = ('job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
def test_browse_job_centers_view(self):
"""Generate a job centers view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_centers', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/job_failed_view.html')
context = response.context
expected_keys = ('job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
class ViewsInProgressJobTestCase(ResponseCheckerMixin, TestJobBase):
"""Exercise views when a job is in progress"""
@property
def faux_output_dir(self):
return os.path.normpath(os.path.join(self.output_path, '..'))
def setUp(self):
super(ViewsInProgressJobTestCase, self).setUp()
self.user = UserFactory(password='kittens!')
self.user.is_superuser = True
self.user.save()
self.assertTrue(self.client.login(username=self.user.username, password='kittens!'))
# I would like to create an in-progress job "organically", but that's hard to do under
# test conditions. Instead I simulate the conditions of in-progress job.
with open(os.path.join(self.output_path, ROLLGEN_FLAG_FILENAME), 'w') as f:
f.write(' ')
self.dirname = os.path.basename(self.output_path)
def test_overview_view(self):
"""Generate a job view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:overview'))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/overview.html')
context = response.context
expected_keys = ('jobs', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(1, len(context['jobs']))
self.assertTrue(context['jobs'][0].in_progress)
# There should not be a link to the job page.
self.assertNotContains(response, reverse('rollgen:browse_job_offices',
args=(self.dirname, )))
def test_browse_job_offices_view(self):
"""Generate a job offices view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_offices', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/job_in_progress_view.html')
context = response.context
expected_keys = ('job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertTrue(context['job'].in_progress)
def test_browse_job_centers_view(self):
"""Generate a job centers view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_centers', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/job_in_progress_view.html')
context = response.context
expected_keys = ('job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertTrue(context['job'].in_progress)
class ViewsEmptyTestCase(ResponseCheckerMixin, TestJobBase):
"""Exercise views when the data directory is empty"""
@property
def faux_output_dir(self):
return os.path.normpath(os.path.join(self.output_path, '..'))
def setUp(self):
super(ViewsEmptyTestCase, self).setUp()
user = UserFactory(password='kittens!')
user.is_superuser = True
user.save()
self.assertTrue(self.client.login(username=user.username, password='kittens!'))
def test_overview_view(self):
"""Generate the overview view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:overview'))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/overview.html')
context = response.context
expected_keys = ('jobs', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(0, len(context['jobs']))
class ViewsNonEmptyTestCase(ResponseCheckerMixin, TestJobBase):
"""Exercise views when the data directory is not empty"""
@property
def faux_output_dir(self):
return os.path.normpath(os.path.join(self.output_path, '..'))
def setUp(self):
super(ViewsNonEmptyTestCase, self).setUp()
user = UserFactory(password='kittens!')
user.is_superuser = True
user.save()
self.assertTrue(self.client.login(username=user.username, password='kittens!'))
phase = 'in-person'
self.input_arguments['phase'] = phase
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
self.job = Job(phase, [self.center], self.input_arguments, self.user.username,
self.output_path)
self.job.generate_rolls()
self.dirname = os.path.basename(self.job.output_path)
def test_overview_view(self):
"""Generate the overview view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:overview'))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/overview.html')
context = response.context
expected_keys = ('jobs', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(1, len(context['jobs']))
self.assertEqual(JobOverview(self.output_path).raw_metadata,
context['jobs'][0].raw_metadata)
def test_new_view(self):
"""Generate the new job view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:new'))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/new.html')
context = response.context
expected_keys = ('form', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertIsInstance(context['form'], NewJobForm)
def test_new_view_no_centers(self):
"""Pass criteria that generate no centers to the new job form and test output"""
# This is the only validation that happens at the view level. All other validation happens
# in the form.
no_reg_center = RegistrationCenterFactory(reg_open=False)
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.post(reverse('rollgen:new'),
{'name': 'kjghdhjdhjghfkjhgdf',
'center_selection_type': 'by_center_text_list',
'center_text_list': [str(no_reg_center.center_id)],
'phase': 'polling',
'forgive_no_voters': False,
'forgive_no_office': False,
}
)
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/new.html')
context = response.context
expected_keys = ('form', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertIsInstance(context['form'], NewJobForm)
self.assertFormError(response, 'form', None,
"The criteria you specified didn't match any active centres.")
def test_browse_job_offices_view(self):
"""Generate a job offices view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_offices', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/browse_job_offices.html')
context = response.context
expected_keys = ('job', 'offices', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertFalse(context['job'].in_progress)
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
self.assertEqual([self.center.office], context['offices'])
def test_browse_job_centers_view(self):
"""Generate a job centers view and test the context it passes to the template"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_job_centers', args=(self.dirname, )))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/browse_job_centers.html')
context = response.context
expected_keys = ('job', 'offices', 'binned_center_ids', 'n_empty_centers', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertFalse(context['job'].in_progress)
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
self.assertEqual([self.center.office], context['offices'])
self.assertEqual(0, context['n_empty_centers'])
binned_center_ids = OrderedDict()
url = reverse('rollgen:browse_office_view', args=[self.dirname, self.center.office.id])
url += ('#c' + str(self.center.center_id))
binned_center_ids[str(self.center.center_id)[:3]] = [(self.center.center_id, url)]
self.assertEqual(binned_center_ids, context['binned_center_ids'])
def test_browse_office_view(self):
"""Generate a browse office view and test the context it passes to the template"""
office_id = self.center.office.id
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_office_view',
args=(self.dirname, office_id)))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/browse_office.html')
context = response.context
expected_keys = ('job_url', 'files', 'office', 'job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(reverse('rollgen:browse_job_offices', args=[self.dirname]),
context['job_url'])
self.assertEqual(self.center.office, context['office'])
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
# context['files'] are a bunch of view.FileInfo objects. I test their attrs here.
actual_files = context['files']
self.assertEqual(2, len(actual_files))
expected_filenames = sorted(['{}_book_f.pdf'.format(self.center.center_id),
'{}_book_m.pdf'.format(self.center.center_id), ])
actual_filenames = sorted([file_info.name for file_info in actual_files])
self.assertEqual(expected_filenames, actual_filenames)
self.assertEqual([3, 3], [file_info.n_pages for file_info in actual_files])
# I don't know exactly how many bytes the PDF files will be, but I want to at least verify
# they're in a sane range.
for file_info in actual_files:
self.assertGreaterEqual(300000, file_info.n_bytes)
self.assertLessEqual(100000, file_info.n_bytes)
def test_browse_office_view_bad_office_id(self):
"""Generate a browse office view with an invalid office id and ensure the response is 404"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_office_view',
args=(self.dirname, 9999)))
self.assertResponseNotFound(response)
def test_browse_office_view_when_office_has_no_files(self):
"""Generate a browse office view for an office that has no files associated
Note that the office-has-no-files state can only occur during the polling phase.
"""
center = RegistrationCenterFactory()
input_arguments = self.input_arguments.copy()
input_arguments['forgive_no_voters'] = True
input_arguments['phase'] = 'polling'
input_arguments['center_ids'] = [center.center_id]
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
job = Job(input_arguments['phase'], [center], input_arguments, self.user.username,
self.output_path)
job.generate_rolls()
dirname = os.path.basename(job.output_path)
office_id = center.office.id
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:browse_office_view',
args=(dirname, office_id)))
self.assertResponseOK(response)
self.assertTemplateUsed(response, 'rollgen/browse_office.html')
context = response.context
expected_keys = ('job_url', 'files', 'office', 'job', )
self.assertTrue(set(expected_keys) < set(context.keys()))
self.assertEqual(reverse('rollgen:browse_job_offices', args=[dirname]), context['job_url'])
self.assertEqual(center.office, context['office'])
self.assertEqual(JobOverview(self.output_path).raw_metadata, context['job'].raw_metadata)
self.assertContains(response, "There are no files for this office.")
def test_serve_pdf(self):
"""Generate an open-this-PDF view and test the response, including headers"""
office_id = self.center.office.id
pdf_filename = '{}_book_f.pdf'.format(self.center.center_id)
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:serve_pdf',
args=(self.dirname, office_id, pdf_filename)))
self.assertResponseOK(response)
self.assertEqual('application/pdf', response['Content-Type'])
self.assertEqual('attachment; filename="{}"'.format(pdf_filename),
response['Content-Disposition'])
self.assertEqual(b'%PDF', response.content[:4])
self.assertGreaterEqual(300000, len(response.content))
self.assertLessEqual(100000, len(response.content))
def test_serve_pdf_bad_office_id(self):
"""Generate an open-this-PDF view with a bad office id and ensure the response is a 404"""
pdf_filename = '{}_book_f.pdf'.format(self.center.center_id)
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:serve_pdf',
args=(self.dirname, 9999, pdf_filename)))
self.assertResponseNotFound(response)
def test_serve_pdf_bad_filename(self):
"""Generate an open-this-PDF view with a bad filename and ensure the response is a 404"""
office_id = self.center.office.id
pdf_filename = '{}_zzzz.pdf'.format(self.center.center_id)
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:serve_pdf',
args=(self.dirname, office_id, pdf_filename)))
self.assertResponseNotFound(response)
def test_serve_zip(self):
"""Generate a download-this-zip view and test the response, including headers"""
office_id = self.center.office.id
zip_filename = '{}.zip'.format(office_id)
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:serve_zip', args=(self.dirname, office_id)))
self.assertResponseOK(response)
self.assertEqual('application/zip', response['Content-Type'])
self.assertEqual('attachment; filename="{}"'.format(zip_filename),
response['Content-Disposition'])
# OK to ignore errors since this is a zipfile so we don't expect it to be in UTF-8. We only
# care about the first 4 characters
self.assertEqual('PK' + chr(0o3) + chr(0o4), response.content.decode(errors='ignore')[:4])
# I don't know exactly how many bytes the ZIP file will be, but I want to at least verify
# that it's in a sane range.
self.assertGreaterEqual(500000, len(response.content))
self.assertLessEqual(250000, len(response.content))
def test_serve_zip_bad_filename(self):
"""Generate a download-this-zip view with a bad filename and ensure the response is a 404"""
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:serve_zip', args=(self.dirname, 9999)))
self.assertResponseNotFound(response)
def test_serve_metadata_csv(self):
"""Generate an view for the metadata CSV and test the response, including headers"""
response = self.client.get(reverse('rollgen:polling_csv'))
content = response.content.decode()
expected_filename = 'metadata_polling_{}.csv'.format(django_now().strftime('%Y_%m_%d'))
self.assertResponseOK(response)
self.assertEqual('text/csv', response['Content-Type'])
self.assertEqual('attachment; filename="{}"'.format(expected_filename),
response['Content-Disposition'])
self.assertEqual('Centre #,', content[:9])
self.assertGreaterEqual(500, len(content))
self.assertLessEqual(100, len(content))
class LoginTestCase(ResponseCheckerMixin, TestCase):
"""Test that users not logged in get bounced to the login page for all rollgen views."""
def test_views_require_login(self):
"""test that no rollgen views are available when not logged in"""
for view_name in ROLLGEN_ALL_VIEW_NAMES:
if view_name in ('browse_job_offices', 'browse_job_centers', ):
args = ['abcdefg']
elif view_name == 'browse_office_view':
args = ['abcdefg', '42']
elif view_name == 'serve_pdf':
args = ['abcdefg', '42', 'foo.pdf']
elif view_name == 'serve_zip':
args = ['abcdefg', '42']
else:
args = []
response = self.client.get(reverse('rollgen:' + view_name, args=args))
self.assertResponseRedirectsToLogin(response)
class GroupMembershipNegativeTestCase(ResponseCheckerMixin, TestCase):
"""Test that a user not in a rollgen-specific group can't see any pages"""
def setUp(self):
super(GroupMembershipNegativeTestCase, self).setUp()
password = 'alligators'
self.user = UserFactory(password=password)
self.assertTrue(self.client.login(username=self.user.username, password=password))
def test_views_require_minimal_group_membership(self):
"""test that no rollgen views are available when user is not in appropriate groups"""
for view_name in ROLLGEN_ALL_VIEW_NAMES:
if view_name in ('browse_job_offices', 'browse_job_centers', ):
args = ['abcdefg']
elif view_name == 'browse_office_view':
args = ['abcdefg', '42']
elif view_name == 'serve_pdf':
args = ['abcdefg', '42', 'foo.pdf']
elif view_name == 'serve_zip':
args = ['abcdefg', '42']
else:
args = []
response = self.client.get(reverse('rollgen:' + view_name, args=args))
self.assertResponseForbidden(response)
class GroupMembershipPositiveTestCase(ResponseCheckerMixin, TestJobBase):
"""Test that users with appropriate permissions can see stuff"""
@property
def faux_output_dir(self):
return os.path.normpath(os.path.join(self.output_path, '..'))
def login(self, login_as_superuser=False):
"""Create a user and log in."""
self.user.is_superuser = login_as_superuser
self.user.save()
self.assertTrue(self.client.login(username=self.user.username, password=self.password))
def setUp(self):
super(GroupMembershipPositiveTestCase, self).setUp()
self.login()
phase = 'in-person'
self.input_arguments['phase'] = phase
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
self.job = Job(phase, [self.center], self.input_arguments, self.user.username,
self.output_path)
self.job.generate_rolls()
self.dirname = os.path.basename(self.job.output_path)
def test_views_allow_superuser(self):
"""test that all rollgen views are available to superusers"""
self.login(login_as_superuser=True)
office_id = str(self.center.office.id)
for view_name in ROLLGEN_ALL_VIEW_NAMES:
if view_name in ('browse_job_offices', 'browse_job_centers', ):
args = [self.dirname]
elif view_name == 'browse_office_view':
args = [self.dirname, office_id]
elif view_name == 'serve_pdf':
args = [self.dirname, office_id, str(self.center.center_id) + '_book_f.pdf']
elif view_name == 'serve_zip':
args = [self.dirname, office_id]
else:
args = []
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:' + view_name, args=args))
self.assertResponseOK(response)
def test_views_for_rollgen_view_job_group(self):
"""test that most rollgen views are available to users in rollgen_view_job"""
self.user.groups.add(Group.objects.get(name='rollgen_view_job'))
office_id = str(self.center.office.id)
for view_name in ROLLGEN_READ_VIEW_NAMES:
if view_name in ('browse_job_offices', 'browse_job_centers', ):
args = [self.dirname]
elif view_name == 'browse_office_view':
args = [self.dirname, office_id]
elif view_name == 'serve_pdf':
args = [self.dirname, office_id, str(self.center.center_id) + '_book_f.pdf']
elif view_name == 'serve_zip':
args = [self.dirname, office_id]
else:
args = []
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:' + view_name, args=args))
self.assertResponseOK(response)
# New/create rollgen should not be available
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:new'))
self.assertResponseForbidden(response)
def test_views_for_rollgen_create_job_group(self):
"""test that all rollgen views are available to users in rollgen_create_job"""
self.user.groups.add(Group.objects.get(name='rollgen_create_job'))
office_id = str(self.center.office.id)
for view_name in ROLLGEN_ALL_VIEW_NAMES:
if view_name in ('browse_job_offices', 'browse_job_centers', ):
args = [self.dirname]
elif view_name == 'browse_office_view':
args = [self.dirname, office_id]
elif view_name == 'serve_pdf':
args = [self.dirname, office_id, str(self.center.center_id) + '_book_f.pdf']
elif view_name == 'serve_zip':
args = [self.dirname, office_id]
else:
args = []
with override_settings(ROLLGEN_OUTPUT_DIR=self.faux_output_dir):
response = self.client.get(reverse('rollgen:' + view_name, args=args))
self.assertResponseOK(response)
| {
"content_hash": "4887e0224af37fa0459266016657ed59",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 100,
"avg_line_length": 46.12537764350453,
"alnum_prop": 0.63248730964467,
"repo_name": "SmartElect/SmartElect",
"id": "55acf9b2406d0f817c790a9ba65728eb9090db6a",
"size": "30552",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "rollgen/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43928"
},
{
"name": "HTML",
"bytes": "175822"
},
{
"name": "JavaScript",
"bytes": "475284"
},
{
"name": "Python",
"bytes": "1848271"
},
{
"name": "Shell",
"bytes": "1834"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="scattersmith", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| {
"content_hash": "e1cf57a6b619f2ee67d8d83020d6896a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6359223300970874,
"repo_name": "plotly/plotly.py",
"id": "35a094d3c358d20deb6d2e083a503ba33fd4d9c2",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattersmith/_legendrank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import numpy as np
__logBase10of2 = 3.010299956639811952137388947244930267681898814621085413104274611e-1
def round_to_n(x, n):
if not (type(n) is int or np.issubdtype(n, np.integer)):
raise TypeError("RoundToSigFigs: sigfigs must be an integer.")
if not np.all(np.isreal(x)):
raise TypeError("RoundToSigFigs: all x must be real.")
if n <= 0:
raise ValueError("RoundtoSigFigs: sigfigs must be positive.")
mantissas, binary_exps = np.frexp(x)
decimal_exps = __logBase10of2 * binary_exps
intParts = np.floor(decimal_exps)
mantissas *= 10.0 ** (decimal_exps - intParts)
return np.around(mantissas, decimals=n - 1) * 10.0 ** intParts
| {
"content_hash": "eaee5002807afcd3bb69a1eac0b1cb75",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 28.958333333333332,
"alnum_prop": 0.6776978417266187,
"repo_name": "RDCEP/atlas-viewer",
"id": "4b6cd8a9c2404f82b4897c0f41f25b094f2c391c",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "atlas/utils/round_to_n.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24793"
},
{
"name": "HTML",
"bytes": "7864"
},
{
"name": "JavaScript",
"bytes": "24163"
},
{
"name": "Python",
"bytes": "23748"
}
],
"symlink_target": ""
} |
from enable.savage.svg.css.identifier import *
| {
"content_hash": "82daf15cd0369f8228a4b4e2475f365d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 46,
"avg_line_length": 47,
"alnum_prop": 0.8085106382978723,
"repo_name": "enthought/etsproxy",
"id": "c7858b2068c422a82410d26370a05d2b64b054a3",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/savage/svg/css/identifier.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import uuid
import six
from boto3 import Session
from moto.core import ACCOUNT_ID
from moto.core import BaseBackend
from moto.core.exceptions import RESTError
from moto.s3 import s3_backends
from moto.ec2 import ec2_backends
from moto.elb import elb_backends
from moto.elbv2 import elbv2_backends
from moto.kinesis import kinesis_backends
from moto.kms import kms_backends
from moto.rds2 import rds2_backends
from moto.glacier import glacier_backends
from moto.redshift import redshift_backends
from moto.emr import emr_backends
# Left: EC2 ElastiCache RDS ELB CloudFront WorkSpaces Lambda EMR Glacier Kinesis Redshift Route53
# StorageGateway DynamoDB MachineLearning ACM DirectConnect DirectoryService CloudHSM
# Inspector Elasticsearch
class ResourceGroupsTaggingAPIBackend(BaseBackend):
def __init__(self, region_name=None):
super(ResourceGroupsTaggingAPIBackend, self).__init__()
self.region_name = region_name
self._pages = {}
# Like 'someuuid': {'gen': <generator>, 'misc': None}
# Misc is there for peeking from a generator and it cant
# fit in the current request. As we only store generators
# theres not really any point to clean up
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
@property
def s3_backend(self):
"""
:rtype: moto.s3.models.S3Backend
"""
return s3_backends["global"]
@property
def ec2_backend(self):
"""
:rtype: moto.ec2.models.EC2Backend
"""
return ec2_backends[self.region_name]
@property
def elb_backend(self):
"""
:rtype: moto.elb.models.ELBBackend
"""
return elb_backends[self.region_name]
@property
def elbv2_backend(self):
"""
:rtype: moto.elbv2.models.ELBv2Backend
"""
return elbv2_backends[self.region_name]
@property
def kinesis_backend(self):
"""
:rtype: moto.kinesis.models.KinesisBackend
"""
return kinesis_backends[self.region_name]
@property
def kms_backend(self):
"""
:rtype: moto.kms.models.KmsBackend
"""
return kms_backends[self.region_name]
@property
def rds_backend(self):
"""
:rtype: moto.rds2.models.RDS2Backend
"""
return rds2_backends[self.region_name]
@property
def glacier_backend(self):
"""
:rtype: moto.glacier.models.GlacierBackend
"""
return glacier_backends[self.region_name]
@property
def emr_backend(self):
"""
:rtype: moto.emr.models.ElasticMapReduceBackend
"""
return emr_backends[self.region_name]
@property
def redshift_backend(self):
"""
:rtype: moto.redshift.models.RedshiftBackend
"""
return redshift_backends[self.region_name]
def _get_resources_generator(self, tag_filters=None, resource_type_filters=None):
# Look at
# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
# TODO move these to their respective backends
filters = []
for tag_filter_dict in tag_filters:
values = tag_filter_dict.get("Values", [])
if len(values) == 0:
# Check key matches
filters.append(lambda t, v, key=tag_filter_dict["Key"]: t == key)
elif len(values) == 1:
# Check its exactly the same as key, value
filters.append(
lambda t, v, key=tag_filter_dict["Key"], value=values[0]: t == key
and v == value
)
else:
# Check key matches and value is one of the provided values
filters.append(
lambda t, v, key=tag_filter_dict["Key"], vl=values: t == key
and v in vl
)
def tag_filter(tag_list):
result = []
if tag_filters:
for f in filters:
temp_result = []
for tag in tag_list:
f_result = f(tag["Key"], tag["Value"])
temp_result.append(f_result)
result.append(any(temp_result))
return all(result)
else:
return True
# Do S3, resource type s3
if not resource_type_filters or "s3" in resource_type_filters:
for bucket in self.s3_backend.buckets.values():
tags = self.s3_backend.tagger.list_tags_for_resource(bucket.arn)["Tags"]
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {"ResourceARN": "arn:aws:s3:::" + bucket.name, "Tags": tags}
# EC2 tags
def get_ec2_tags(res_id):
result = []
for key, value in self.ec2_backend.tags.get(res_id, {}).items():
result.append({"Key": key, "Value": value})
return result
# EC2 AMI, resource type ec2:image
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:image" in resource_type_filters
):
for ami in self.ec2_backend.amis.values():
tags = get_ec2_tags(ami.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::image/{1}".format(
self.region_name, ami.id
),
"Tags": tags,
}
# EC2 Instance, resource type ec2:instance
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:instance" in resource_type_filters
):
for reservation in self.ec2_backend.reservations.values():
for instance in reservation.instances:
tags = get_ec2_tags(instance.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::instance/{1}".format(
self.region_name, instance.id
),
"Tags": tags,
}
# EC2 NetworkInterface, resource type ec2:network-interface
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:network-interface" in resource_type_filters
):
for eni in self.ec2_backend.enis.values():
tags = get_ec2_tags(eni.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::network-interface/{1}".format(
self.region_name, eni.id
),
"Tags": tags,
}
# TODO EC2 ReservedInstance
# EC2 SecurityGroup, resource type ec2:security-group
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:security-group" in resource_type_filters
):
for vpc in self.ec2_backend.groups.values():
for sg in vpc.values():
tags = get_ec2_tags(sg.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::security-group/{1}".format(
self.region_name, sg.id
),
"Tags": tags,
}
# EC2 Snapshot, resource type ec2:snapshot
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:snapshot" in resource_type_filters
):
for snapshot in self.ec2_backend.snapshots.values():
tags = get_ec2_tags(snapshot.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::snapshot/{1}".format(
self.region_name, snapshot.id
),
"Tags": tags,
}
# TODO EC2 SpotInstanceRequest
# EC2 Volume, resource type ec2:volume
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:volume" in resource_type_filters
):
for volume in self.ec2_backend.volumes.values():
tags = get_ec2_tags(volume.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}::volume/{1}".format(
self.region_name, volume.id
),
"Tags": tags,
}
# TODO add these to the keys and values functions / combine functions
# ELB, resource type elasticloadbalancing:loadbalancer
def get_elbv2_tags(arn):
result = []
for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items():
result.append({"Key": key, "Value": value})
return result
if (
not resource_type_filters
or "elasticloadbalancing" in resource_type_filters
or "elasticloadbalancing:loadbalancer" in resource_type_filters
):
for elb in self.elbv2_backend.load_balancers.values():
tags = get_elbv2_tags(elb.arn)
if not tag_filter(tags): # Skip if no tags, or invalid filter
continue
yield {"ResourceARN": "{0}".format(elb.arn), "Tags": tags}
# ELB Target Group, resource type elasticloadbalancing:targetgroup
def get_target_group_tags(arn):
result = []
for key, value in self.elbv2_backend.target_groups[
target_group.arn
].tags.items():
result.append({"Key": key, "Value": value})
return result
if (
not resource_type_filters
or "elasticloadbalancing" in resource_type_filters
or "elasticloadbalancing:targetgroup" in resource_type_filters
):
for target_group in self.elbv2_backend.target_groups.values():
tags = get_target_group_tags(target_group.arn)
if not tag_filter(tags): # Skip if no tags, or invalid filter
continue
yield {"ResourceARN": "{0}".format(target_group.arn), "Tags": tags}
# EMR Cluster
# Glacier Vault
# Kinesis
# KMS
def get_kms_tags(kms_key_id):
result = []
for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags", []):
result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]})
return result
if not resource_type_filters or "kms" in resource_type_filters:
for kms_key in self.kms_backend.list_keys():
tags = get_kms_tags(kms_key.id)
if not tag_filter(tags): # Skip if no tags, or invalid filter
continue
yield {"ResourceARN": "{0}".format(kms_key.arn), "Tags": tags}
# RDS Instance
if (
not resource_type_filters
or "rds" in resource_type_filters
or "rds:db" in resource_type_filters
):
for database in self.rds_backend.databases.values():
tags = database.get_tags()
if not tags or not tag_filter(tags):
continue
yield {
"ResourceARN": database.db_instance_arn,
"Tags": tags,
}
# RDS Reserved Database Instance
# RDS Option Group
# RDS Parameter Group
# RDS Security Group
# RDS Snapshot
if (
not resource_type_filters
or "rds" in resource_type_filters
or "rds:snapshot" in resource_type_filters
):
for snapshot in self.rds_backend.snapshots.values():
tags = snapshot.get_tags()
if not tags or not tag_filter(tags):
continue
yield {
"ResourceARN": snapshot.snapshot_arn,
"Tags": tags,
}
# RDS Subnet Group
# RDS Event Subscription
# RedShift Cluster
# RedShift Hardware security module (HSM) client certificate
# RedShift HSM connection
# RedShift Parameter group
# RedShift Snapshot
# RedShift Subnet group
# VPC
if (
not resource_type_filters
or "ec2" in resource_type_filters
or "ec2:vpc" in resource_type_filters
):
for vpc in self.ec2_backend.vpcs.values():
tags = get_ec2_tags(vpc.id)
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
continue
yield {
"ResourceARN": "arn:aws:ec2:{0}:{1}:vpc/{2}".format(
self.region_name, ACCOUNT_ID, vpc.id
),
"Tags": tags,
}
# VPC Customer Gateway
# VPC DHCP Option Set
# VPC Internet Gateway
# VPC Network ACL
# VPC Route Table
# VPC Subnet
# VPC Virtual Private Gateway
# VPC VPN Connection
def _get_tag_keys_generator(self):
# Look at
# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
# Do S3, resource type s3
for bucket in self.s3_backend.buckets.values():
tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn)
for key, _ in tags.items():
yield key
# EC2 tags
def get_ec2_keys(res_id):
result = []
for key in self.ec2_backend.tags.get(res_id, {}):
result.append(key)
return result
# EC2 AMI, resource type ec2:image
for ami in self.ec2_backend.amis.values():
for key in get_ec2_keys(ami.id):
yield key
# EC2 Instance, resource type ec2:instance
for reservation in self.ec2_backend.reservations.values():
for instance in reservation.instances:
for key in get_ec2_keys(instance.id):
yield key
# EC2 NetworkInterface, resource type ec2:network-interface
for eni in self.ec2_backend.enis.values():
for key in get_ec2_keys(eni.id):
yield key
# TODO EC2 ReservedInstance
# EC2 SecurityGroup, resource type ec2:security-group
for vpc in self.ec2_backend.groups.values():
for sg in vpc.values():
for key in get_ec2_keys(sg.id):
yield key
# EC2 Snapshot, resource type ec2:snapshot
for snapshot in self.ec2_backend.snapshots.values():
for key in get_ec2_keys(snapshot.id):
yield key
# TODO EC2 SpotInstanceRequest
# EC2 Volume, resource type ec2:volume
for volume in self.ec2_backend.volumes.values():
for key in get_ec2_keys(volume.id):
yield key
def _get_tag_values_generator(self, tag_key):
# Look at
# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
# Do S3, resource type s3
for bucket in self.s3_backend.buckets.values():
tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn)
for key, value in tags.items():
if key == tag_key:
yield value
# EC2 tags
def get_ec2_values(res_id):
result = []
for key, value in self.ec2_backend.tags.get(res_id, {}).items():
if key == tag_key:
result.append(value)
return result
# EC2 AMI, resource type ec2:image
for ami in self.ec2_backend.amis.values():
for value in get_ec2_values(ami.id):
yield value
# EC2 Instance, resource type ec2:instance
for reservation in self.ec2_backend.reservations.values():
for instance in reservation.instances:
for value in get_ec2_values(instance.id):
yield value
# EC2 NetworkInterface, resource type ec2:network-interface
for eni in self.ec2_backend.enis.values():
for value in get_ec2_values(eni.id):
yield value
# TODO EC2 ReservedInstance
# EC2 SecurityGroup, resource type ec2:security-group
for vpc in self.ec2_backend.groups.values():
for sg in vpc.values():
for value in get_ec2_values(sg.id):
yield value
# EC2 Snapshot, resource type ec2:snapshot
for snapshot in self.ec2_backend.snapshots.values():
for value in get_ec2_values(snapshot.id):
yield value
# TODO EC2 SpotInstanceRequest
# EC2 Volume, resource type ec2:volume
for volume in self.ec2_backend.volumes.values():
for value in get_ec2_values(volume.id):
yield value
def get_resources(
self,
pagination_token=None,
resources_per_page=50,
tags_per_page=100,
tag_filters=None,
resource_type_filters=None,
):
# Simple range checking
if 100 >= tags_per_page >= 500:
raise RESTError(
"InvalidParameterException", "TagsPerPage must be between 100 and 500"
)
if 1 >= resources_per_page >= 50:
raise RESTError(
"InvalidParameterException", "ResourcesPerPage must be between 1 and 50"
)
# If we have a token, go and find the respective generator, or error
if pagination_token:
if pagination_token not in self._pages:
raise RESTError(
"PaginationTokenExpiredException", "Token does not exist"
)
generator = self._pages[pagination_token]["gen"]
left_over = self._pages[pagination_token]["misc"]
else:
generator = self._get_resources_generator(
tag_filters=tag_filters, resource_type_filters=resource_type_filters
)
left_over = None
result = []
current_tags = 0
current_resources = 0
if left_over:
result.append(left_over)
current_resources += 1
current_tags += len(left_over["Tags"])
try:
while True:
# Generator format: [{'ResourceARN': str, 'Tags': [{'Key': str, 'Value': str]}, ...]
next_item = six.next(generator)
resource_tags = len(next_item["Tags"])
if current_resources >= resources_per_page:
break
if current_tags + resource_tags >= tags_per_page:
break
current_resources += 1
current_tags += resource_tags
result.append(next_item)
except StopIteration:
# Finished generator before invalidating page limiting constraints
return None, result
# Didn't hit StopIteration so there's stuff left in generator
new_token = str(uuid.uuid4())
self._pages[new_token] = {"gen": generator, "misc": next_item}
# Token used up, might as well bin now, if you call it again your an idiot
if pagination_token:
del self._pages[pagination_token]
return new_token, result
def get_tag_keys(self, pagination_token=None):
if pagination_token:
if pagination_token not in self._pages:
raise RESTError(
"PaginationTokenExpiredException", "Token does not exist"
)
generator = self._pages[pagination_token]["gen"]
left_over = self._pages[pagination_token]["misc"]
else:
generator = self._get_tag_keys_generator()
left_over = None
result = []
current_tags = 0
if left_over:
result.append(left_over)
current_tags += 1
try:
while True:
# Generator format: ['tag', 'tag', 'tag', ...]
next_item = six.next(generator)
if current_tags + 1 >= 128:
break
current_tags += 1
result.append(next_item)
except StopIteration:
# Finished generator before invalidating page limiting constraints
return None, result
# Didn't hit StopIteration so there's stuff left in generator
new_token = str(uuid.uuid4())
self._pages[new_token] = {"gen": generator, "misc": next_item}
# Token used up, might as well bin now, if you call it again your an idiot
if pagination_token:
del self._pages[pagination_token]
return new_token, result
def get_tag_values(self, pagination_token, key):
if pagination_token:
if pagination_token not in self._pages:
raise RESTError(
"PaginationTokenExpiredException", "Token does not exist"
)
generator = self._pages[pagination_token]["gen"]
left_over = self._pages[pagination_token]["misc"]
else:
generator = self._get_tag_values_generator(key)
left_over = None
result = []
current_tags = 0
if left_over:
result.append(left_over)
current_tags += 1
try:
while True:
# Generator format: ['value', 'value', 'value', ...]
next_item = six.next(generator)
if current_tags + 1 >= 128:
break
current_tags += 1
result.append(next_item)
except StopIteration:
# Finished generator before invalidating page limiting constraints
return None, result
# Didn't hit StopIteration so there's stuff left in generator
new_token = str(uuid.uuid4())
self._pages[new_token] = {"gen": generator, "misc": next_item}
# Token used up, might as well bin now, if you call it again your an idiot
if pagination_token:
del self._pages[pagination_token]
return new_token, result
# These methods will be called from responses.py.
# They should call a tag function inside of the moto module
# that governs the resource, that way if the target module
# changes how tags are delt with theres less to change
# def tag_resources(self, resource_arn_list, tags):
# return failed_resources_map
#
# def untag_resources(self, resource_arn_list, tag_keys):
# return failed_resources_map
resourcegroupstaggingapi_backends = {}
for region in Session().get_available_regions("resourcegroupstaggingapi"):
resourcegroupstaggingapi_backends[region] = ResourceGroupsTaggingAPIBackend(region)
for region in Session().get_available_regions(
"resourcegroupstaggingapi", partition_name="aws-us-gov"
):
resourcegroupstaggingapi_backends[region] = ResourceGroupsTaggingAPIBackend(region)
for region in Session().get_available_regions(
"resourcegroupstaggingapi", partition_name="aws-cn"
):
resourcegroupstaggingapi_backends[region] = ResourceGroupsTaggingAPIBackend(region)
| {
"content_hash": "fd9197891c4825640458061305e6f112",
"timestamp": "",
"source": "github",
"line_count": 719,
"max_line_length": 100,
"avg_line_length": 34.48539638386648,
"alnum_prop": 0.5365194595684614,
"repo_name": "william-richard/moto",
"id": "f8d61ea1944147930617bbb1ac0bd58e4cc93c96",
"size": "24795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/resourcegroupstaggingapi/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('votaciones_leyes', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='vote',
name='topic',
),
migrations.AlterField(
model_name='motion',
name='proposal_date',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='vote',
name='role',
field=models.CharField(max_length=512, null=True),
),
migrations.AlterField(
model_name='voteevent',
name='end_date',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='voteevent',
name='start_date',
field=models.DateTimeField(null=True),
),
]
| {
"content_hash": "951462b57a0ac881266750ed256cf525",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 62,
"avg_line_length": 25.89189189189189,
"alnum_prop": 0.535490605427975,
"repo_name": "lfalvarez/votaciones_ley_pesca",
"id": "69432bcabb32a5988626b79a3b6f08197b12d384",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "votaciones_leyes/migrations/0002_auto_20150620_2101.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "HTML",
"bytes": "6004"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "20325"
}
],
"symlink_target": ""
} |
"""
Views for Scholarship app.
"""
import csv
import os
from venv import create
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.http import require_GET, require_POST
from django.utils.datastructures import MultiValueDict
from django_downloadview import sendfile
from django.core.exceptions import ObjectDoesNotExist
from . import notify
from .models import (
AcademicResource,
LibraryItem,
StudyHoursRecord,
TrackedUser,
Course,
CourseSection,
)
from .forms import (
AcademicResourceForm,
LibraryItemForm,
StudyHoursRecordForm,
TrackedUserForm,
CourseForm,
CourseSectionForm,
)
def request_is_from_tracked_user(request):
"""
Returns wehther the request is from a user who must log study hours.
Returns: bool
"""
return (
TrackedUser.objects.filter(number_of_hours__gt=0, user=request.user).count()
== 1
)
def request_is_from_scholarship_head(request): # pylint: disable=invalid-name
"""
Returns whether the request from the scholarship head user.
Returns: bool
"""
return request.user.has_perm("Scholarship.scholarship_head")
def get_currently_tracked_users():
"""
Returns a queryset of TrackedUsers who must log study hours.
Returns: Queryset
"""
return TrackedUser.objects.filter(number_of_hours__gt=0)
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
def download_hours(_request):
"""
Export hours from the db as a csv
"""
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment;filename=study_hours.csv"
records = StudyHoursRecord.objects.all()
writer = csv.writer(response)
writer.writerow(
[
"Username",
"Name",
"# of Hours",
"Reported Date",
"Submission Timestamp",
]
)
for record in records:
writer.writerow(
[
record.user.username,
record.user.first_name + " " + record.user.last_name,
record.number_of_hours,
record.date,
record.time_stamp,
]
)
return response
@login_required
@require_GET
def index(_request):
"""
TODO: Docstring
"""
return redirect("scholarship-library")
@login_required
@require_GET
def study_hours(request):
"""
TODO: Docstring
"""
# Figure out if this request is from the scholarship head
is_scholarship_head = request_is_from_scholarship_head(request)
# Figure out if this request is from a user who currently has their
# study hours tracked
is_tracked_user = request_is_from_tracked_user(request)
# Initialize objects if necessary for the scholarship head
currently_tracked_users = None
update_requirements_form = None
if is_scholarship_head:
currently_tracked_users = get_currently_tracked_users()
update_requirements_form = TrackedUserForm()
# Initialize objects if necessary for a tracked user
record_hours_form = None
tracked_user_object = None
tracked_user_records_this_week = None
if is_tracked_user:
record_hours_form = StudyHoursRecordForm()
tracked_user_object = TrackedUser.objects.get(user=request.user)
tracked_user_records = StudyHoursRecord.objects.filter(
user=request.user
).order_by("-date")
tracked_user_records_this_week = [
record for record in tracked_user_records if record.happened_this_week()
]
context = {
"is_scholarship_head": is_scholarship_head,
"is_tracked_user": is_tracked_user,
"currently_tracked_users": currently_tracked_users,
"update_requirements_form": update_requirements_form,
"record_hours_form": record_hours_form,
"tracked_user_object": tracked_user_object,
"tracked_user_records_this_week": tracked_user_records_this_week,
}
return render(request, "scholarship/study_hours.html", context)
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def update_requirements(request):
"""
TODO: Docstring
"""
update_requirements_form = TrackedUserForm(request.POST)
if update_requirements_form.is_valid():
tracked_user = update_requirements_form.save()
if tracked_user.number_of_hours == 0:
notify.study_hours_untracked(tracked_user)
else:
notify.study_hours_tracked(tracked_user)
message = "User's study hours requirement successfully updated."
messages.info(request, message, extra_tags="track")
else:
message = (
"Both user name and number of hours "
+ "is required. Number of hours cannot be < 0.",
)
messages.error(request, message, extra_tags="track")
return redirect("scholarship-study_hours")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def untrack_user(request, user):
"""
TODO: Docstring
"""
tracked_user = TrackedUser.objects.get(pk=user)
tracked_user.number_of_hours = 0
tracked_user.save()
notify.study_hours_untracked(tracked_user)
message = "User's study hours requirement successfully updated."
messages.info(request, message, extra_tags="track")
return redirect("scholarship-study_hours")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def send_probation(request, user):
"""
TODO: Docstring
"""
tracked_user = TrackedUser.objects.get(pk=user)
notify.social_probation(tracked_user)
message = "User has successfully been notified " + "about their social probation."
messages.info(request, message, extra_tags="track")
return redirect("scholarship-study_hours")
@login_required
@require_POST
def record_hours(request):
"""
TODO: Docstring
"""
record_hours_form = StudyHoursRecordForm(request.POST)
if record_hours_form.is_valid():
study_hours_record = record_hours_form.save(commit=False)
study_hours_record.user = request.user
study_hours_record.save()
message = "Study hours successfully reported."
messages.info(request, message, extra_tags="report")
else:
message = (
"You must input a positive number "
+ "of hours studied. The date studied must "
+ "have taken place this week and not "
+ "in the future."
)
messages.error(request, message, extra_tags="report")
return redirect("scholarship-study_hours")
@login_required
@require_GET
def resources(request):
"""
TODO: Docstring
"""
is_scholarship_head = request_is_from_scholarship_head(request)
upload_resource_form = None
upload_resource_form = AcademicResourceForm()
academic_resources = AcademicResource.objects.filter(approved=True)
context = {
"is_scholarship_head": is_scholarship_head,
"upload_resource_form": upload_resource_form,
"resources": academic_resources,
}
return render(request, "scholarship/resources.html", context)
@login_required
@require_POST
def upload_resource(request):
"""
TODO: Docstring
"""
# Retrieve the files from the MultiValueDictionary
files = request.FILES.getlist("resource_pdf")
# Iterate over the list of files, uploading them separately
for file in files:
# Build a MultiValueDictionary containing just this file
# This is done simply because it is the required format of the
# Academic Resource Form
mvd = MultiValueDict()
file_list = [file]
mvd.setlist("resource_pdf", file_list)
# Save the Files
upload_resource_form = AcademicResourceForm(request.POST, mvd)
# Check if the resource is valid
if upload_resource_form.is_valid():
# Save the resource
resource = upload_resource_form.save(commit=False)
# Get the resource name and extension
file_name, extension = os.path.splitext(
os.path.basename(resource.resource_pdf.name)
)
# Set the resource name
resource.resource_name = file_name
if extension == ".pdf":
if request_is_from_scholarship_head(request):
resource.approved = True
message = file_name + " uploaded successfully!"
else:
resource.approved = False
message = file_name + " submitted for approval!"
notify.scholarship_content_submitted()
resource.submittedBy = request.user
resource.save()
messages.info(request, message)
else:
messages.error(request, file_name + " was not a pdf file.")
else:
message = (
"Failed to upload resource. Make "
+ "sure that you have provided all fields correctly."
)
messages.error(request, message)
# Can add additional response information here if needed in the future
response = {}
# if request.META['HTTP_ACCEPT'] == 'application/json':
# content_type = 'application/json'
# else:
# content_type = 'text/plain'
return JsonResponse(response)
@login_required
@require_GET
def download_resource(request, resource):
"""
View for downloading a resource
"""
resource_obj = AcademicResource.objects.get(pk=resource)
allowed = resource_obj.approved or request_is_from_scholarship_head(request)
if allowed:
_, extension = os.path.splitext(
os.path.basename(resource_obj.resource_pdf.name)
)
fname = resource_obj.resource_name + extension
return sendfile(
request,
resource_obj.resource_pdf.path,
attachment=True,
attachment_filename=fname,
)
return redirect("pub-permission_denied")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def delete_resource(_request, resource):
"""
View for deleting a resource
"""
resource_obj = AcademicResource.objects.get(pk=resource)
resource_obj.resource_pdf.delete() # Delete actual file
resource_obj.delete()
return redirect("scholarship-resources")
@login_required
@require_GET
def library(request):
"""
TODO: Docstring
"""
is_scholarship_head = request_is_from_scholarship_head(request)
upload_item_form = None
upload_item_form = LibraryItemForm()
items = LibraryItem.objects.filter(approved=True)
context = {
"is_scholarship_head": is_scholarship_head,
"upload_item_form": upload_item_form,
"items": items,
}
return render(request, "scholarship/library.html", context)
@login_required
@require_POST
def upload_libraryitem(request):
"""
TODO: Docstring
"""
upload_item_form = LibraryItemForm(request.POST, request.FILES)
if upload_item_form.is_valid():
item = upload_item_form.save(commit=False)
if request_is_from_scholarship_head(request):
item.approved = True
message = "Item uploaded successfully!"
else:
item.approved = False
message = "Item submitted for approval successfully!"
notify.scholarship_content_submitted()
item.submittedBy = request.user
item.save()
messages.info(request, message)
else:
message = (
"Failed to upload item. Make "
+ "sure that you have provided all fields correctly."
)
messages.error(request, message)
# Can add additional response information here if needed in the future
response = {}
# if request.META['HTTP_ACCEPT'] == 'application/json':
# content_type = 'application/json'
# else:
# content_type = 'text/plain'
return JsonResponse(response)
@login_required
@require_GET
def download_libraryitem(request, item):
"""
View for downloading a library item
"""
item_obj = LibraryItem.objects.get(pk=item)
allowed = item_obj.approved or request_is_from_scholarship_head(request)
if allowed:
_, extension = os.path.splitext(os.path.basename(item_obj.item_pdf.name))
fname = item_obj.title + extension
return sendfile(
request, item_obj.item_pdf.path, attachment=True, attachment_filename=fname
)
return redirect("pub-permission_denied")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def delete_libraryitem(_request, item):
"""
View for deleting a library item
"""
item_obj = LibraryItem.objects.get(pk=item)
item_obj.item_pdf.delete() # Delete actual file
item_obj.delete()
return redirect("scholarship-library")
@permission_required("Scholarship.scholarship_head", login_url="pub-permission_denied")
@require_GET
def approve(request):
"""
TODO: Docstring
"""
pending_items = LibraryItem.objects.filter(approved=False)
pending_resources = AcademicResource.objects.filter(approved=False)
context = {
"items": pending_items,
"resources": pending_resources,
"is_scholarship_head": request_is_from_scholarship_head(request),
}
return render(request, "scholarship/approve.html", context)
@permission_required("Scholarship.scholarship_head", login_url="pub-permission_denied")
@require_POST
def approve_resource(request, resource):
"""
TODO: Docstring
"""
try:
resource_obj = AcademicResource.objects.get(pk=resource)
except AcademicResource.DoesNotExist:
messages.error(
request,
"The resource you tried to approve no longer exists.",
)
else:
resource_obj.approved = True
resource_obj.save()
messages.info(
request,
"Resource approved successfully. It is now visible to all users.",
)
return redirect("scholarship-approve")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def decline_resource(request, resource):
"""
TODO: Docstring
"""
try:
resource_obj = AcademicResource.objects.get(pk=resource)
except AcademicResource.DoesNotExist:
messages.error(
request,
"The resource you tried to decline has already been declined.",
)
else:
resource_obj.resource_pdf.delete() # Delete actual file
resource_obj.delete()
messages.info(request, "Resource declined successfully.")
return redirect("scholarship-approve")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def approve_libraryitem(request, item):
"""
TODO: Docstring
"""
try:
item_obj = LibraryItem.objects.get(pk=item)
except LibraryItem.DoesNotExist:
messages.error(
request,
"The item you tried to approve no longer exists.",
)
else:
item_obj.approved = True
item_obj.save()
messages.info(
request,
"Item approved successfully. It is now visible to all users.",
)
return redirect("scholarship-approve")
@permission_required(
"Scholarship.scholarship_head",
login_url="pub-permission_denied",
)
@require_POST
def decline_libraryitem(request, item):
"""
TODO: Docstring
"""
try:
item_obj = LibraryItem.objects.get(pk=item)
except LibraryItem.DoesNotExist:
messages.error(
request,
"The Item you tried to decline has already been declined.",
)
else:
item_obj.item_pdf.delete() # Delete actual file
item_obj.delete()
messages.info(request, "Item declined successfully.")
return redirect("scholarship-approve")
@login_required
@require_POST
def add_course(request):
"""
TODO: Docstring
"""
add_course_form = CourseForm(request.POST)
if add_course_form.is_valid():
course_record = add_course_form.save(commit=False)
course_record.save()
message = "Course successfully recorded."
messages.info(request, message, extra_tags="report")
else:
message = (
"Please enter a valid course code (ie CS3733) "
+ "and ensure an entry does not already exist"
)
messages.error(request, message, extra_tags="report")
return redirect("scholarship-courses")
@login_required
@require_POST
def add_course_section(request):
"""
TODO: Docstring
"""
add_course_section_form = CourseSectionForm(request.POST)
if add_course_section_form.is_valid():
course = add_course_section_form.save(commit=False)
# Get the section that exists or create a new one
addedSection, created = CourseSection.objects.get_or_create(
term=course.term,
catalog_course=course.catalog_course,
year=course.year,
professor=course.professor,
)
if not created:
message = "Course Section already exists"
messages.error(request, message, extra_tags="report")
else:
addedSection.participants.add(request.user)
message = "Course Section successfully recorded."
messages.info(request, message, extra_tags="report")
return redirect("scholarship-section", catalog_code=course.catalog_course)
else:
message = "Required fields were not filled out or some field was malformed"
messages.error(request, message, extra_tags="report")
return redirect("scholarship-courses")
@login_required
@require_POST
def record_review(request):
"""
TODO: Docstring
"""
record_review_form = ReviewForm(request.POST)
if record_review_form.is_valid():
review_record = record_review_form.save(commit=False)
review_record.reviewer = request.user
review_record.save()
message = "Review successfully reported."
messages.info(request, message, extra_tags="report")
else:
message = "The Review was invalid? what did you do"
messages.error(request, message, extra_tags="report")
return redirect("scholarship-record_review")
@login_required
@require_GET
def courses(request):
"""
View for seeing all courses
"""
is_scholarship_head = request_is_from_scholarship_head(request)
all_courses = Course.objects.order_by("-catalog_code")
error = None
msg = None
add_course_form = None
add_course_form = CourseForm()
add_course_section_form = None
add_course_section_form = CourseSectionForm()
try:
error = request.session["scholarship_course_error"]
del request.session["scholarship_course_error"]
except KeyError:
pass
try:
msg = request.session["scholarship_course_msg"]
del request.session["scholarship_course_msg"]
except KeyError:
pass
context = {
"is_scholarship_head": is_scholarship_head,
"all_courses": all_courses,
"error": error,
"msg": msg,
"add_course_form": add_course_form,
"add_course_section_form": add_course_section_form,
}
return render(request, "scholarship/courses.html", context)
@login_required
@require_GET
def sections(request, catalog_code=None):
"""
View for seeing the sections of selected course
"""
# Figure out if this request is from the scholarship head
is_scholarship_head = request_is_from_scholarship_head(request)
if catalog_code:
all_sections = CourseSection.objects.filter(
catalog_course__catalog_code=catalog_code
).order_by("-year", "term")
else:
all_sections = CourseSection.objects.order_by("-year", "term")
add_course_section_form = CourseSectionForm(
initial={"catalog_course": catalog_code}
)
rows = []
for section in all_sections:
for user in section.participants.all():
rows.append(
{
"brother": user,
"term": section.term,
"year": section.year,
"professor": section.professor,
}
)
context = {
"is_scholarship_head": is_scholarship_head,
"course": catalog_code,
"add_course_section_form": add_course_section_form,
"rows": rows,
}
return render(request, "scholarship/sections.html", context)
| {
"content_hash": "27197875f5280285977e4715a8b5a850",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 87,
"avg_line_length": 28.961748633879782,
"alnum_prop": 0.6384433962264151,
"repo_name": "sigmapi-gammaiota/sigmapi-web",
"id": "1901479e840c3bfe3609e5daa8ecf83d0b17b465",
"size": "21200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigmapiweb/apps/Scholarship/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47173"
},
{
"name": "HTML",
"bytes": "265883"
},
{
"name": "JavaScript",
"bytes": "1338629"
},
{
"name": "Python",
"bytes": "335952"
},
{
"name": "SCSS",
"bytes": "44203"
},
{
"name": "Shell",
"bytes": "3928"
}
],
"symlink_target": ""
} |
from web2kindle.libs.utils import load_config
import os
from web2kindle.libs.log import Log
LOG = Log("Web2Kindle")
INF = 99999999999999999999
CURRENT_PATH = os.getcwd() if len(os.getcwd()) > 1 else '/storage/emulated/0/w2k'
CONFIG_PATH = os.path.join(CURRENT_PATH, 'web2kindle/config')
BIN_PATH = os.path.join(CURRENT_PATH, 'web2kindle/bin')
TEMPLATES_PATH = os.path.join(CURRENT_PATH, 'web2kindle/templates')
MAIN_CONFIG_PATH = os.path.join(CONFIG_PATH, 'config.yml')
KINDLE_GEN_PATH_LINUX = os.path.join(BIN_PATH, 'kindlegen_linux')
KINDLE_GEN_PATH_WINDOWS = os.path.join(BIN_PATH, 'kindlegen.exe')
KINDLE_GEN_PATH_MAC = os.path.join(BIN_PATH, 'kindlegen_mac')
TEMPLATES_KINDLE_CONTENT = os.path.join(TEMPLATES_PATH, 'kindle_content.html')
TEMPLATES_KINDLE_OPF = os.path.join(TEMPLATES_PATH, 'kindle_opf.html')
TEMPLATES_KINDLE_TABLE = os.path.join(TEMPLATES_PATH, 'kindle_table.html')
TEMPLATES_KINDLE_NCX = os.path.join(TEMPLATES_PATH, 'kindle_ncx.ncx')
CONFIG_GUOKE_SCIENTIFIC = os.path.join(CONFIG_PATH, 'guoke_scientific.yml')
CONFIG_JIANSHU_USER = os.path.join(CONFIG_PATH, 'jianshu_user.yml')
CONFIG_JIANSHU_WENJI = os.path.join(CONFIG_PATH, 'jianshu_wenji.yml')
CONFIG_JIANSHU_ZHUANTI = os.path.join(CONFIG_PATH, 'jianshu_zhuanti.yml')
CONFIG_QDAILY = os.path.join(CONFIG_PATH, 'qdaily.yml')
CONFIG_ZHIHU_ANSWERS = os.path.join(CONFIG_PATH, 'zhihu_answers.yml')
CONFIG_ZHIHU_COLLECTION = os.path.join(CONFIG_PATH, 'zhihu_collection.yml')
CONFIG_ZHIHU_ZHUANLAN = os.path.join(CONFIG_PATH, 'zhihu_zhuanlan.yml')
CONFIG_ZHIHU_DAILY = os.path.join(CONFIG_PATH, 'zhihu_daily.yml')
MAIN_CONFIG = load_config(MAIN_CONFIG_PATH)
LOG.log_it("CURRENT_PATH:{}".format(CURRENT_PATH))
LOG.log_it("MAIN_CONFIG:{}".format(MAIN_CONFIG))
| {
"content_hash": "52d56b32aa1fcf84baeb63674cbb3a59",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 48.27777777777778,
"alnum_prop": 0.7520138089758343,
"repo_name": "wax8280/web2kindle",
"id": "434d8f81b165d7a27040205eee19b3a75def46ee",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web2kindle/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16334"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "170680"
}
],
"symlink_target": ""
} |
"""
Generate the site
Usage:
roxy [--config=INI] generate <site> [--file=FILE]
roxy [--config=INI] initialize <site> [--file=FILE]
roxy [--config=INI] shell <site> [--file=FILE]
roxy (-h | --help)
Options:
-h, --help Show this text
-c INI, --config=INI Config path [default: site.ini]
-f FILE, --file=FILE Content store, defaults to <site>.content
"""
import os
import sys
import logging
import mimetypes
import importlib
import code
from datetime import datetime
from dateutil.tz import tzutc
from docopt import docopt
from jinja2 import TemplateNotFound
from markdown import Markdown
from BeautifulSoup import BeautifulSoup
import roxy.model as model
import roxy.configure as configure
import roxy.util as util
import roxy.generators
from roxy.model import Model, Site, Content, Asset, Tag, Property
from roxy.events import BeforeRender, BeforeIngest, BeforeRoute, BeforeRender,\
BeforeGenerate, BeforeWrite, AfterIngest, AfterGenerate, AfterRender,\
AfterRoute, AfterWrite, Render
logger = None
def main(argv=sys.argv):
global logger
arguments = docopt(__doc__)
config = configure.configure(arguments)
logger = logging.getLogger('roxy')
try:
if arguments['generate']:
generate(arguments, config)
if arguments['shell']:
l = {
'session': model.get_session(),
'site': Site.get(slug=config['site']),
'Site': Site,
'Content': Content,
'Asset': Asset,
'Tag': Tag,
'Property': Property
}
code.interact(local=l)
except Exception as e:
import traceback
import sys
import pdb
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
def generate(arguments, config):
session = model.get_session()
# find site or create if doesn't exist
site = Site.get(slug=config['site'])
if not site:
site = Site(slug=config['site'], name=config['name'], url=config['url'])
session.add(site)
# for all content encountered
BeforeIngest.fire(site, config)
content = ingest_content(site, config)
assets = ingest_assets(site, config)
AfterIngest.fire(site, config, content=content, assets=assets)
session.add_all(content)
session.add_all(assets)
session.commit()
# import module which generates site
BeforeGenerate.fire(site, config)
generator = importlib.import_module(config['generator'])
AfterGenerate.fire(site, config, generator)
# iterate over routes
route_mappings = {}
BeforeRoute.fire(site, config, route_mappings)
# route_mappings.update({path: context for path, _, _, context in write_list if isinstance(context, Model)})
route_mappings.update({a.path: a for a in assets})
config['renderer'].filters['route'] = make_router(config, route_mappings)
config['renderer'].filters['fetch'] = make_fetcher(config, route_mappings)
config['renderer'].filters['render'] = make_renderer(config)
AfterRoute.fire(site, config, route_mappings)
# render the documents
write_list = []
render_list = []
BeforeRender.fire(site, config, render_list)
for path, template, fallback, context in render_list:
values = {}
if isinstance(context, Model):
params = dict(site=site)
params.update(values)
keyname = context.__class__.__name__.lower()
params[keyname] = context
context = make_context(config, **params)
else:
values.update(context)
context = make_context(config, site=site, **values)
Render.fire(site, path, template, fallback, context)
logger.info("rendering {} via {}".format(path, template))
s = render(config['renderer'], template, fallback, context)
AfterRender.fire(site, values, path, template, fallback, content, s)
write_list.append((path, s))
# process the write list
BeforeWrite.fire(site, config, write_list)
for path, s in write_list:
if path.startswith('/'):
path = path[1:]
path = os.path.join(config['build_path'], path)
logger.info("writing {}".format(path))
util.write(path, s)
AfterWrite.fire(site, config, write_list)
def make_context(config, **kwargs):
values = {}
values.update(kwargs)
values['now'] = datetime.utcnow().replace(tzinfo=tzutc()).astimezone(config['timezone'])
return values
def make_fetcher(config, mappings):
objects_by_key = {}
objects_by_slug = {}
for context in mappings.values():
if hasattr(context, 'key'):
objects_by_key[context.key] = context
if hasattr(context, 'slug'):
objects_by_slug[context.slug] = context
def fetcher(key, cls_=None):
if cls_ is None:
if key in objects_by_key:
return objects_by_key[key]
if key in objects_by_slug:
return objects_by_slug[key]
else:
classes = {
'content': Content,
'asset': Asset,
'tag': Tag
}
if cls_.lower() in classes:
cls = classes[cls_]
by_key = cls.get(key)
by_slug = cls.get(slug=key)
if by_key is not None or by_slug is not None:
return by_key or by_slug
raise KeyError(key)
return fetcher
def make_router(config, mappings):
routes_by_key = {}
routes_by_slug = {}
for path, context in mappings.items():
if hasattr(context, 'key'):
routes_by_key[context.key] = path
if hasattr(context, 'slug'):
routes_by_slug[context.slug] = path
def router(target, absolute=False):
p = None
if isinstance(target, Model):
p = routes_by_key[target.key]
if hasattr(target, 'key') and target.key in routes_by_key:
p = routes_by_key[target.key]
elif hasattr(target, 'slug') and target.slug in routes_by_slug:
p = routes_by_slug[target.slug]
if target in routes_by_key:
p = routes_by_key[target]
elif target in routes_by_slug:
p = routes_by_slug[target]
if p:
if absolute:
base = config['url_base']
return util.url_join(base, p)
else:
return p
raise KeyError(target)
return router
_md_renderer = None
def make_renderer(config):
global _md_renderer
if not _md_renderer:
_md_renderer = Markdown()
route = config['renderer'].filters['route']
fetch = config['renderer'].filters['fetch']
def render_filter(s):
md = _md_renderer.convert(s)
soup = BeautifulSoup(md)
def is_ref(attr):
return attr is not None and ':' in attr
def dereference(attr):
id, ref = attr.split(':', 1)
if '.' in ref:
ref, field = ref.split('.', 1)
obj = fetch(id, cls_=ref)
return getattr(obj, field)
else:
return route(id)
def replace_attrs(el, attr, callback):
for e in soup.findAll(el, attrs={attr: is_ref}):
a = e.get(attr)
value = dereference(a)
callback(e, attr, value)
replace_attrs('a', 'href', _set_attr)
replace_attrs('a', 'title', _set_attr)
replace_attrs('img', 'src', _set_absolute_url)
replace_attrs('img', 'alt', _set_attr)
return unicode(soup)
def _set_absolute_url(el, key, path):
url = util.url_join(config['url_base'], path)
_set_attr(el, key, url)
def _set_attr(el, key, value):
for i, (k, v) in enumerate(el.attrs):
if k.lower() == key.lower():
el.attrs[i] = (k, value)
return render_filter
def render(renderer, template, fallback, context):
try:
template = renderer.get_template(template)
except TemplateNotFound:
if fallback:
template = renderer.get_template(fallback)
else:
raise
return template.render(context)
def ingest_assets(site, config):
# for each file encountered in asset directory
asset_files = discover_assets(config['asset_source_path'])
assets = []
processors = []
for path in asset_files:
relative_path = os.path.relpath(path, config['asset_source_path'])
m = mimetypes.guess_type(path)
if m:
mimetype = m[0].split('/')[0]
else:
raise NotImplementedError(m)
# search for metadata
mdpath = '{}.metadata'.format(*os.path.splitext(path))
if os.path.exists(mdpath):
with open(mdpath, 'rb') as md:
document = md.read()
header, body = _parse_content_header(document)
metadata = _parse_metadata(header, config)
else:
metadata = {}
# compute the file's checksum
with open(path,'rb') as f:
checksum = util.checksum(f)
a = Asset.get(site=site, path=relative_path)
if a is None:
a = Asset(site=site, path=relative_path)
for k, v in metadata.items():
setattr(a, k, v)
a.checksum = checksum
a.site = site
if body:
a.body = body
assets.append(a)
return assets
def discover_assets(path):
asset_files = []
logger.info("crawling {}".format(path))
for root, dirs, files in os.walk(path):
for f in files:
extension = os.path.splitext(f)[-1][1:].lower()
if extension != 'metadata':
path = os.path.join(root, f)
asset_files.append(path)
logger.debug("collected {}".format(path))
return asset_files
def ingest_content(site, config):
# compute allowed extensions
extensions = []
for f in config['document_formats']:
if f == 'markdown':
extensions.extend(['md', 'markdown'])
content_files = discover_content(config['content_source_path'], extensions)
content = []
for f in content_files:
relative_path = os.path.relpath(f, config['content_source_path'])
with open(f, 'rb') as f:
document = f.read()
metadata, body = parse_document(document, config)
c = Content.get(site=site, path=relative_path)
if c is None:
c = Content(site=site, path=relative_path)
for k, v in metadata.items():
setattr(c, k, v)
c.body = body
content.append(c)
return content
def discover_content(path, extensions):
content_files = []
logger.info("crawling {}".format(path))
for root, dirs, files in os.walk(path):
for f in files:
extension = os.path.splitext(f)[-1][1:].lower()
if extension in extensions:
path = os.path.join(root, f)
content_files.append(path)
logger.debug("collected {}".format(path))
else:
logger.debug("ignoring {}".format(path))
for d in dirs:
path = os.path.join(root, d)
content_files.extend(discover_content(path, extensions))
return content_files
def _parse_content_header(document):
metadata = []
lines = (line.strip() for line in document.split('\n'))
for line in lines:
line = line.strip()
# first blank line indicates end of metadata
if len(line.strip()) == 0:
break
metadata.append(line)
body = '\n'.join(list(lines))
metadata = '\n'.join(metadata)
return metadata.decode('utf8'), body.decode('utf8')
def _parse_metadata(document, config):
# The keywords are case-insensitive and may consist of letters, numbers,
# underscores and dashes and must end with a colon. The values consist of
# anything following the colon on the line and may even be blank.
#
# If a line is indented by 4 or more spaces, that line is assumed to be an
# additional line of the value for the previous keyword. A keyword may have
# as many lines as desired.
session = model.get_session()
meta = {}
current_key = None
metadata, _ = _parse_content_header(document)
for line in metadata.split('\n'):
line = line.strip()
# first blank line indicates end of metadata
if len(line.strip()) == 0:
break
# two cases
# 1) line has a colon in it, split key from value
# 2) line doesn't, which indicates it continues previous k/v pair
parts = line.split(':', 1)
if len(parts) > 1:
current_key = parts[0].strip().lower()
# if value is empty string, assume beginning of a list
value = parts[1].strip()
if len(value) == 0:
meta[current_key] = []
else:
meta[current_key] = value
if len(parts) == 1:
if not isinstance(meta[current_key], list):
meta[current_key] = [meta[current_key]]
value = parts[0].strip()
meta[current_key].append(value)
for k, v in meta.items():
if k.endswith('_time') or k == 'time':
v = dateutil.parse.parse(v)
try:
v = v.astimezone(tzinfo=config['timezone'])
except ValueError:
v = v.replace(tzinfo=config['timezone'])
meta[k] = v
elif k.endswith('_date') or k == 'date':
v = datetime.strptime(v, '%Y-%m-%d').replace(tzinfo=config['timezone'])
meta[k] = v
elif k == 'tags':
if not isinstance(v, list):
v = map(lambda s: s.strip(), v.split(u','))
tags = []
for t in v:
tag = Tag.get(slug=t)
if not tag:
tag = Tag(name=t)
session.add(tag)
tags.append(tag)
meta[k] = tags
if 'time' in meta:
meta[u'publish_time'] = meta['time']
del meta['time']
elif 'date' in meta:
meta[u'publish_time'] = meta['date']
del meta['date']
return meta
def parse_document(document, config):
header, body = _parse_content_header(document)
return _parse_metadata(header, config), body
if __name__ == '__main__':
main()
| {
"content_hash": "3a2c15f4d86ee0782240d157935fd036",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 112,
"avg_line_length": 29.312,
"alnum_prop": 0.566457423580786,
"repo_name": "jessedhillon/roxy",
"id": "aa2d8b8fd82729e3830268c8f27a7844cab9a945",
"size": "14656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roxy/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56201"
}
],
"symlink_target": ""
} |
import os
from flask import Flask, request, render_template
from flask_restful import Resource, Api, reqparse
from simple_salesforce import Salesforce
app = Flask(__name__)
api = Api(app)
#Initialize a connection with Salesforce
sf = Salesforce(username=os.environ.get('SALESFORCE_USERNAME'), password=os.environ.get('SALESFORCE_PASSWORD'), security_token=os.environ.get('SALESFORCE_TOKEN'))
@app.route('/')
def home():
return render_template('index.html')
class Account(Resource):
# Tell the api what are required fields to get a meaniful error while insert
parser = reqparse.RequestParser()
parser.add_argument('Ownerid',required=True, help="Ownerid is a required Field")
def get(self, name):
results = sf.query("SELECT Id, Name FROM Account WHERE Name LIKE '%"+name+"%'")
return results, 201
def post(self, name):
data = Account.parser.parse_args()
response = sf.Account.create({'Ownerid':data['Ownerid'],'Name':name})
return response, 201
#http://herokuapp.cmm/account/Burlington_texttilles
api.add_resource(Account, '/account/<string:name>')
if __name__ == '__main__':
app.run(debug = True, port=5000)
| {
"content_hash": "3b9a259e5da4ab481a4ca4120b497c42",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 162,
"avg_line_length": 34.794117647058826,
"alnum_prop": 0.7016060862214708,
"repo_name": "sumugapadman/Heroku_Python_Sf",
"id": "0903bbca5615eae91c0ad576396b2a51f2378fe8",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2236"
},
{
"name": "Python",
"bytes": "1183"
}
],
"symlink_target": ""
} |
import sys
import time
import argparse
import signal
import storjnode
from btctxstore import BtcTxStore
from crochet import setup
# start twisted via crochet and remove twisted handler
setup()
signal.signal(signal.SIGINT, signal.default_int_handler)
def _parse_args(args):
parser = argparse.ArgumentParser(description="Start storjnode swarm.")
# debug
parser.add_argument('--debug', action='store_true',
help="Show debug information.")
# quiet
parser.add_argument('--quiet', action='store_true',
help="Don't show logging information.")
# isolate
parser.add_argument('--isolate', action='store_true',
help="Isolate swarm form main network.")
# ports
default = 5000
msg = "Where swarm ports start from. Default: {0}"
parser.add_argument("--ports", default=default, type=int,
help=msg.format(default))
# size
default = 20
msg = "Number of nodes in the swarm. Default: {0}"
parser.add_argument("--size", default=default, type=int,
help=msg.format(default))
return vars(parser.parse_args(args=args))
if __name__ == "__main__":
arguments = _parse_args(sys.argv[1:])
# isolate swarm if requested
bootstrap_nodes = None
if arguments["isolate"]:
bootstrap_nodes = [("127.0.0.1", arguments["ports"])]
swarm = []
try:
btctxstore = BtcTxStore()
for i in range(arguments["size"]):
port = arguments["ports"] + i
node_key = btctxstore.create_key()
peer = storjnode.network.Node(node_key, port=port,
bootstrap_nodes=bootstrap_nodes)
swarm.append(peer)
print("Started peer {0} on port {1}.".format(i, port))
time.sleep(0.1)
# serve forever
print("Running swarm with {0} ...".format(len(swarm)))
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Stopping nodes")
for node in swarm:
node.stop()
| {
"content_hash": "f4784f790f6e3729026cd8d7406572ae",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 28.18421052631579,
"alnum_prop": 0.5830999066293184,
"repo_name": "Storj/storjnode",
"id": "37166dd14282c48240d4c71a7b88b8e5ecff4e2a",
"size": "2160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/network/swarm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "314006"
}
],
"symlink_target": ""
} |
"""
Module for running server
"""
import os
from app import APP
from healthcheck import HEALTHCHECK
APP.register_blueprint(HEALTHCHECK)
ENV = os.environ.get('ENV', None)
if ENV == 'local':
APP.run(host='0.0.0.0', port=1000)
| {
"content_hash": "245f7a167dcd5af786dcfedd2de65d67",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 38,
"avg_line_length": 16.571428571428573,
"alnum_prop": 0.7025862068965517,
"repo_name": "miljkovicivan/MicroComments",
"id": "98d2a27b4703bfc05a6b386c6a86a7cab220b093",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Nginx",
"bytes": "582"
},
{
"name": "Python",
"bytes": "16678"
},
{
"name": "Shell",
"bytes": "171"
}
],
"symlink_target": ""
} |
import click
@click.command()
@click.option('--count', default=1, help='Number of greetings.')
@click.option('--name', prompt='Your name',
help='The person to greet.')
def hello(count, name):
"""Simple program that greets NAME for a total of COUNT times."""
for x in range(count):
click.echo(f"Hello {name}!")
if __name__ == '__main__':
hello() | {
"content_hash": "b4541c06acfdfc93090684b15bc8be21",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 18.476190476190474,
"alnum_prop": 0.5979381443298969,
"repo_name": "jdurbin/sandbox",
"id": "8e46123694c3d7e4a290389f08a1b4828c187b80",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/cli/clicktest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "61127"
},
{
"name": "Java",
"bytes": "15322"
},
{
"name": "Julia",
"bytes": "26131"
},
{
"name": "Jupyter Notebook",
"bytes": "1712400"
},
{
"name": "Python",
"bytes": "96680"
},
{
"name": "R",
"bytes": "4770"
}
],
"symlink_target": ""
} |
from keras.models import Sequential, Graph
from keras.objectives import mse
from keras.layers.core import Dropout, Dense
import numpy as np
import theano
import seaborn
def create_models(
n_dims=25,
dropout_probability=0.1,
adversarial_weight=1,
reconstruction_weight=1,
generator_hidden_size_multipliers=[4, 0.5],
decoder_hidden_size_multiplier=[4, 2, 1, 0.5],
activation="relu"):
decoder = Graph()
decoder.add_input(name="input", input_shape=(n_dims,))
last_layer = "input"
for i, size in enumerate(decoder_hidden_size_multiplier):
hidden_name = "dense%d" % (i + 1)
decoder.add_node(
layer=Dense(size * n_dims, activation=activation),
name=hidden_name,
inputs=[last_layer, "input"])
dropout_name = hidden_name + "_dropout"
decoder.add_node(
layer=Dropout(dropout_probability),
name=dropout_name,
input=hidden_name)
last_layer = dropout_name
decoder.add_node(
layer=Dense(n_dims, activation="sigmoid"),
name="output",
inputs=[last_layer, "input"],
create_output=True)
decoder.compile(optimizer="rmsprop", loss={"output": "binary_crossentropy"})
def generator_loss(combined, imputed_vector):
"""
Ignores y_true and y_pred
"""
original_vector = combined[:, :n_dims]
missing_mask = combined[:, n_dims:]
input_variable = decoder.get_input()
decoder_compute_graph = decoder.get_output()
mask_prediction = theano.clone(
decoder_compute_graph,
{input_variable: imputed_vector},
share_inputs=True)
reconstruction_loss = mse(
y_true=original_vector * (1 - missing_mask),
y_pred=imputed_vector * (1 - missing_mask))
decoder_mask_loss = mse(missing_mask, missing_mask * mask_prediction)
return (
reconstruction_weight * reconstruction_loss
- adversarial_weight * decoder_mask_loss)
generator = Sequential()
generator.add(Dense(
(generator_hidden_size_multipliers[0] * n_dims),
input_dim=2 * n_dims,
activation=activation))
generator.add(Dropout(dropout_probability))
for layer_size_multiplier in generator_hidden_size_multipliers[1:] + [1]:
generator.add(Dense(
int(layer_size_multiplier * n_dims),
activation=activation))
generator.add(Dropout(dropout_probability))
generator.add(Dense(n_dims, activation='linear'))
generator.compile(optimizer="rmsprop", loss=generator_loss)
return generator, decoder
def create_data(
n_samples=1000,
n_dims=25,
offset=0,
fraction_missing=0.5):
t = np.linspace(-1, 1, n_dims, endpoint=False)
assert len(t) == n_dims
X_full = np.zeros((n_samples, n_dims))
for i in range(n_samples):
phase = np.random.randn() * np.pi / 2
frequency = 10 * np.random.rand()
X_full[i, :] = offset + np.sin(phase + t * frequency)
missing_mask = np.random.random(X_full.shape) < fraction_missing
X_incomplete = X_full.copy()
X_incomplete[missing_mask] = 0.0
return X_full, X_incomplete, missing_mask
def pretrain_decoder(
X_incomplete,
missing_mask,
decoder,
batch_size=128,
training_epochs=5):
X_incomplete = X_incomplete.copy()
n_samples, n_dims = X_incomplete.shape
X_incomplete[missing_mask] = np.nan
feature_means = np.nanmean(X_incomplete, axis=0)
assert len(feature_means) == n_dims
feature_stds = np.nanstd(X_incomplete, axis=0)
assert len(feature_stds) == n_dims
X_incomplete[missing_mask] = 0.0
indices = np.arange(n_samples)
for epoch in range(training_epochs):
np.random.shuffle(indices)
n_batches = n_samples // batch_size
for batch_idx in range(n_batches):
batch_indices = indices[
batch_idx * batch_size:(batch_idx + 1) * batch_size]
X_batch = X_incomplete[batch_indices]
missing_mask_batch = missing_mask[batch_indices]
X_batch_noisy = X_batch.copy()
for feature_idx in range(n_dims):
noise = np.random.randn(batch_size) * feature_stds[feature_idx] + feature_means[feature_idx]
missing_rows = missing_mask_batch[:, feature_idx]
X_batch_noisy[missing_rows, feature_idx] = noise[missing_rows]
predicted_mask = decoder.predict({"input": X_batch_noisy})["output"]
print("Pre-training epoch %d, mini-batch %d, accuracy=%0.4f" % (
epoch + 1,
batch_idx + 1,
((predicted_mask > 0.5) == missing_mask_batch).mean(),))
decoder.train_on_batch({
"input": X_batch_noisy,
"output": missing_mask_batch
})
def train(
X_full,
X_incomplete,
mask,
generator,
decoder,
batch_size=128,
training_epochs=10,
alternating_updates=False,
plot_each_epoch=False,
decoder_pretrain_epochs=5):
combined = np.hstack([X_incomplete, mask])
n_samples = len(X_full)
indices = np.arange(n_samples)
for epoch in range(training_epochs):
np.random.shuffle(indices)
n_batches = n_samples // batch_size
for batch_idx in range(n_batches):
batch_indices = indices[
batch_idx * batch_size:(batch_idx + 1) * batch_size]
combined_batch = combined[batch_indices]
mask_batch = mask[batch_indices]
X_imputed = generator.predict(combined_batch)
X_full_batch = X_full[batch_indices]
reconstruction_mse = ((X_imputed - X_full_batch) ** 2).mean()
predicted_mask = decoder.predict({"input": X_imputed})["output"]
masking_mse = ((mask_batch - predicted_mask) ** 2).mean()
if np.isnan(reconstruction_mse):
raise ValueError("Generator Diverged!")
if np.isnan(masking_mse):
raise ValueError("Decoder Diverged!")
print((
"-- Epoch %d, batch %d, "
"Reconstruction MSE = %0.4f, "
"Decoder MSE = %0.4f, "
"Decoder accuracy = %0.4f "
"(mean mask prediction = %0.4f)") % (
epoch + 1,
batch_idx + 1,
reconstruction_mse,
masking_mse,
((predicted_mask > 0.5) == mask_batch).mean(),
predicted_mask.mean()))
print("Decoder mask predictions: %s" % (
list(zip(predicted_mask[0], mask_batch[0])),))
if not alternating_updates or batch_idx % 2 == 0:
decoder_input = X_imputed.copy()
decoder_input[~mask_batch] = X_full_batch[~mask_batch]
decoder.train_on_batch({
"input": decoder_input,
"output": mask_batch
})
if not alternating_updates or batch_idx % 2 == 1:
generator.train_on_batch(
X=combined_batch,
y=combined_batch)
if plot_each_epoch or epoch == training_epochs - 1:
seaborn.plt.plot(X_imputed[0, :], label="X_imputed")
seaborn.plt.plot(X_full_batch[0, :n_dims], label="X_full")
seaborn.plt.plot(mask_batch[0], label="mask")
seaborn.plt.legend()
seaborn.plt.show()
if __name__ == "__main__":
n_dims = 50
n_samples = 10 ** 5
training_epochs = 10
batch_size = 128
pretrain = False
X_full, X_incomplete, missing_mask = create_data(
n_dims=n_dims,
n_samples=n_samples,
fraction_missing=0.9)
generator, decoder = create_models(
n_dims=n_dims,
activation="relu",
adversarial_weight=0.1)
if pretrain:
pretrain_decoder(
X_incomplete=X_incomplete,
missing_mask=missing_mask,
decoder=decoder,
training_epochs=training_epochs,
batch_size=batch_size)
train(
X_full,
X_incomplete,
missing_mask,
generator=generator,
decoder=decoder,
training_epochs=10,
alternating_updates=False,
plot_each_epoch=False,
batch_size=batch_size)
| {
"content_hash": "e9595585d13d594c7b56b73e970f1006",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 108,
"avg_line_length": 35.87763713080169,
"alnum_prop": 0.5660355168763965,
"repo_name": "iskandr/adversarial-impute",
"id": "4e58177bc06604bda5f9da373a79f1c11ab95e38",
"size": "8504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adversarial-impute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8504"
}
],
"symlink_target": ""
} |
"""
"""
from . import core, geometry, string, misc # noqa
execute = core.OdhQLFunction.execute
create = core.OdhQLFunction.create
| {
"content_hash": "45eb9653bbd312e574733f169d8bebfa",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 19,
"alnum_prop": 0.7218045112781954,
"repo_name": "hsr-ba-fs15-dat/opendatahub",
"id": "6c49c0be25fcaa793982bb78189e2b0ba8a9fe43",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/hub/odhql/functions/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25670"
},
{
"name": "HTML",
"bytes": "75529"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Makefile",
"bytes": "1995"
},
{
"name": "PLSQL",
"bytes": "9414"
},
{
"name": "Python",
"bytes": "334952"
},
{
"name": "Shell",
"bytes": "280"
},
{
"name": "TypeScript",
"bytes": "111755"
}
],
"symlink_target": ""
} |
import sys
import glob
import os
from setuptools import setup, find_packages
package_data_list = list()
for root, dirs, files in os.walk('hoggle/project_templates'):
dir_path = root
dir_list = dir_path.split('/')
del dir_list[0]
dir_path = "/".join(dir_list)
for f in files:
package_data_list.append(os.path.join(dir_path, f))
package_data_list.append("app_templates/*.txt")
setup(
name = "hoggle",
version = "0.1",
packages = find_packages(),
package_data = {
"hoggle": package_data_list,
},
# Metadata for PyPI
description = "A static website generator from Markdown templates",
url = "https://github.com/sirvaliance/hoggle",
author = "Sir Valiance",
author_email = "sir@sirvaliance.com",
license = "BSDv3",
entry_points = {
'console_scripts': [
"hoggle = hoggle.__main__:main",
]
},
install_requires = ['tornado', 'misaka', 'argparse'],
)
| {
"content_hash": "4afc1777747a71db57bfa4c13613fabf",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 22.6046511627907,
"alnum_prop": 0.6090534979423868,
"repo_name": "sirvaliance/hoggle",
"id": "f43dd34d6696cf8b45d3a3fd794f22a539202c99",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8032"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class SiteSettingsConfig(AppConfig):
name = 'texaslan.site_settings'
verbose_name = "Site Settings"
def ready(self):
"""Override this to put in:
"""
pass
| {
"content_hash": "a9ac98ebfba0db2c848c70c82044bdda",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 36,
"avg_line_length": 20.636363636363637,
"alnum_prop": 0.6299559471365639,
"repo_name": "TexasLAN/texaslan.org",
"id": "d54b7794cf5f93d77a4201d93a2f351f6d136fd5",
"size": "227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "texaslan/site_settings/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "242691"
},
{
"name": "Dockerfile",
"bytes": "576"
},
{
"name": "HTML",
"bytes": "116042"
},
{
"name": "JavaScript",
"bytes": "50779"
},
{
"name": "Python",
"bytes": "165928"
},
{
"name": "Shell",
"bytes": "4612"
}
],
"symlink_target": ""
} |
import fixtures
import mock
import webob
from nova.api.openstack.compute import multinic as multinic_v21
from nova import compute
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
def compute_api_add_fixed_ip(self, context, instance, network_id):
global last_add_fixed_ip
last_add_fixed_ip = (instance['uuid'], network_id)
def compute_api_remove_fixed_ip(self, context, instance, address):
global last_remove_fixed_ip
last_remove_fixed_ip = (instance['uuid'], address)
def compute_api_get(self, context, instance_id, expected_attrs=None,
cell_down_support=False):
instance = objects.Instance()
instance.uuid = instance_id
instance.id = 1
instance.vm_state = 'fake'
instance.task_state = 'fake'
instance.obj_reset_changes()
return instance
class FixedIpTestV21(test.NoDBTestCase):
controller_class = multinic_v21
validation_error = exception.ValidationError
def setUp(self):
super(FixedIpTestV21, self).setUp()
fakes.stub_out_networking(self)
self.stub_out('nova.compute.api.API.add_fixed_ip',
compute_api_add_fixed_ip)
self.stub_out('nova.compute.api.API.remove_fixed_ip',
compute_api_remove_fixed_ip)
self.stub_out('nova.compute.api.API.get', compute_api_get)
self.controller = self.controller_class.MultinicController()
self.fake_req = fakes.HTTPRequest.blank('')
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
self.mock_get.return_value = fake_instance.fake_instance_obj(
self.fake_req.environ['nova.context'], uuid=UUID,
project_id=self.fake_req.environ['nova.context'].project_id)
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict(networkId='test_net'))
self.controller._add_fixed_ip(self.fake_req, UUID, body=body)
self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
def _test_add_fixed_ip_bad_request(self, body):
self.assertRaises(self.validation_error,
self.controller._add_fixed_ip,
self.fake_req,
UUID, body=body)
def test_add_fixed_ip_empty_network_id(self):
body = {'addFixedIp': {'network_id': ''}}
self._test_add_fixed_ip_bad_request(body)
def test_add_fixed_ip_network_id_bigger_than_36(self):
body = {'addFixedIp': {'network_id': 'a' * 37}}
self._test_add_fixed_ip_bad_request(body)
def test_add_fixed_ip_no_network(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict())
self._test_add_fixed_ip_bad_request(body)
self.assertEqual(last_add_fixed_ip, (None, None))
@mock.patch.object(compute.api.API, 'add_fixed_ip')
def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip):
mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps(net='netid')
body = dict(addFixedIp=dict(networkId='test_net'))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._add_fixed_ip,
self.fake_req,
UUID, body=body)
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict(address='10.10.10.1'))
self.controller._remove_fixed_ip(self.fake_req, UUID, body=body)
self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict())
self.assertRaises(self.validation_error,
self.controller._remove_fixed_ip,
self.fake_req,
UUID, body=body)
self.assertEqual(last_remove_fixed_ip, (None, None))
def test_remove_fixed_ip_invalid_address(self):
body = {'removeFixedIp': {'address': ''}}
self.assertRaises(self.validation_error,
self.controller._remove_fixed_ip,
self.fake_req,
UUID, body=body)
@mock.patch.object(compute.api.API, 'remove_fixed_ip',
side_effect=exception.FixedIpNotFoundForInstance(
instance_uuid=UUID, ip='10.10.10.1'))
def test_remove_fixed_ip_not_found(self, _remove_fixed_ip):
body = {'removeFixedIp': {'address': '10.10.10.1'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._remove_fixed_ip,
self.fake_req,
UUID, body=body)
class MultinicAPIDeprecationTest(test.NoDBTestCase):
def setUp(self):
super(MultinicAPIDeprecationTest, self).setUp()
self.controller = multinic_v21.MultinicController()
self.req = fakes.HTTPRequest.blank('', version='2.44')
def test_add_fixed_ip_not_found(self):
body = dict(addFixedIp=dict(networkId='test_net'))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller._add_fixed_ip, self.req, UUID, body=body)
def test_remove_fixed_ip__not_found(self):
body = dict(removeFixedIp=dict(address='10.10.10.1'))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller._remove_fixed_ip, self.req, UUID, body=body)
| {
"content_hash": "b1472d894ad63dbb3ace05c56742c3c9",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 37.903225806451616,
"alnum_prop": 0.6257021276595744,
"repo_name": "klmitch/nova",
"id": "ceaaebf37336eef08b7062528b7e9a1f89cf805e",
"size": "6511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_multinic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "851"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "44222"
},
{
"name": "Python",
"bytes": "22328409"
},
{
"name": "Shell",
"bytes": "29138"
},
{
"name": "Smarty",
"bytes": "405441"
}
],
"symlink_target": ""
} |
import comtypes.client, ctypes
################################################################
# Interfaces
class IClassFactory(comtypes.IUnknown):
_iid_ = comtypes.GUID("{00000001-0000-0000-C000-000000000046}")
_methods_ = [
comtypes.STDMETHOD(comtypes.HRESULT, "CreateInstance",
[ctypes.POINTER(comtypes.IUnknown),
ctypes.POINTER(comtypes.GUID),
ctypes.POINTER(ctypes.c_void_p)]),
comtypes.STDMETHOD(comtypes.HRESULT, "LockServer",
[ctypes.c_int])]
def CreateInstance(self, punkouter=None, interface=None, dynamic=False):
if dynamic:
if interface is not None:
raise ValueError("interface and dynamic are mutually exclusive")
realInterface = comtypes.automation.IDispatch
elif interface is None:
realInterface = comtypes.IUnknown
else:
realInterface = interface
obj = ctypes.POINTER(realInterface)()
self.__com_CreateInstance(punkouter, realInterface._iid_, ctypes.byref(obj))
if dynamic:
return comtypes.client.dynamic.Dispatch(obj)
elif interface is None:
# An interface was not specified, so return the best.
return comtypes.client.GetBestInterface(obj)
# An interface was specified and obj is already that interface.
return obj
##class IExternalConnection(IUnknown):
## _iid_ = GUID("{00000019-0000-0000-C000-000000000046}")
## _methods_ = [
## STDMETHOD(HRESULT, "AddConnection", [c_ulong, c_ulong]),
## STDMETHOD(HRESULT, "ReleaseConnection", [c_ulong, c_ulong, c_ulong])]
# The following code is untested:
ACTIVEOBJECT_STRONG = 0x0
ACTIVEOBJECT_WEAK = 0x1
oleaut32 = ctypes.oledll.oleaut32
def RegisterActiveObject(comobj, weak=True):
punk = comobj._com_pointers_[comtypes.IUnknown._iid_]
clsid = comobj._reg_clsid_
if weak:
flags = ACTIVEOBJECT_WEAK
else:
flags = ACTIVEOBJECT_STRONG
handle = ctypes.c_ulong()
oleaut32.RegisterActiveObject(punk,
ctypes.byref(clsid),
flags,
ctypes.byref(handle))
return handle.value
def RevokeActiveObject(handle):
oleaut32.RevokeActiveObject(handle, None)
| {
"content_hash": "c9fc1a4d83272a4c7bf768082e27c1a0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 39.54838709677419,
"alnum_prop": 0.5848287112561175,
"repo_name": "ezarko/cfn-init",
"id": "01393b6a4bbe7c6ca9019f0464387807b964fd2f",
"size": "2452",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "comtypes/server/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "539671"
}
],
"symlink_target": ""
} |
"""
WSGI config for inventory project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inventory.settings")
application = get_wsgi_application()
| {
"content_hash": "dbab686db3c29a164965fc3946f79a1d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.75,
"alnum_prop": 0.7727272727272727,
"repo_name": "NdagiStanley/inventory",
"id": "7242ab434435ba916b0017e024f8348cf4dd2175",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "883"
},
{
"name": "Python",
"bytes": "10591"
}
],
"symlink_target": ""
} |
class SearchClient(object):
"""
Low-level interface to Camlistore indexer search operations.
The indexer component visits all blobs in the store and infers
connections between them based on its knowledge of certain schema
formats.
In particular, the indexer is responsible for tracking all of the
modification claims for a permanode and providing its flattened
attribute map for any given point in time.
However, the indexer also has special knowledge of the conventions
around storage of filesystems and can thus be a more convenient interface
for filesystem traversal than the raw blob interface.
Callers should not instantiate this class directly. Instead, call
:py:func:`camlistore.connect` to obtain a
:py:class:`camlistore.Connection`
object and access :py:attr:`camlistore.Connection.searcher`.
"""
def __init__(self, http_session, base_url):
self.http_session = http_session
self.base_url = base_url
def _make_url(self, path):
if self.base_url is not None:
from urlparse import urljoin
return urljoin(self.base_url, path)
else:
from camlistore.exceptions import ServerFeatureUnavailableError
raise ServerFeatureUnavailableError(
"Server does not support search interface"
)
def query(self, expression):
"""
Run a query against the index, returning an iterable of
:py:class:`SearchResult`.
The given expression is just passed on verbatim to the underlying
query interface.
Query constraints are not yet supported.
"""
import json
req_url = self._make_url("camli/search/query")
data = {
# TODO: Understand how constraints work and implement them
# https://github.com/bradfitz/camlistore/blob/
# ca58231336e5711abacb059763beb06e8b2b1788/pkg/search/query.go#L255
#"constraint": "",
"expression": expression,
}
resp = self.http_session.post(
req_url,
data=json.dumps(data),
)
if resp.status_code != 200:
from camlistore.exceptions import ServerError
raise ServerError(
"Failed to search for %r: server returned %i %s" % (
expression,
resp.status_code,
resp.reason,
)
)
raw_data = json.loads(resp.content)
return [
SearchResult(x["blob"]) for x in raw_data["blobs"]
]
def describe_blob(self, blobref):
"""
Request a description of a particular blob, returning a
:py:class:`BlobDescription` object.
The "description" of a blob is the indexer's record of the blob,
so it contains only the subset of information retained by the
indexer. The level of detail in the returned object will thus
depend on what the indexer knows about the given object.
"""
import json
req_url = self._make_url("camli/search/describe")
resp = self.http_session.get(
req_url,
params={
"blobref": blobref,
},
)
if resp.status_code != 200:
from camlistore.exceptions import ServerError
raise ServerError(
"Failed to describe %s: server returned %i %s" % (
blobref,
resp.status_code,
resp.reason,
)
)
raw = json.loads(resp.content)
my_raw = raw["meta"][blobref]
other_raw = raw["meta"]
return BlobDescription(
self,
my_raw,
other_raw_dicts=other_raw,
)
def get_claims_for_permanode(self, blobref):
"""
Get the claims for a particular permanode, as an iterable of
:py:class:`ClaimMeta`.
The concept of "claims" is what allows a permanode to appear
mutable even though the underlying storage is immutable. The
indexer processes each of the valid claims on a given permanode
to produce an aggregated set of its attributes for a given point
in time.
Most callers should prefer to use :py:meth:`describe_blob` instead,
since that returns the flattened result of processing all
attributes, rather than requiring the client to process the claims
itself.
"""
import json
req_url = self._make_url("camli/search/claims")
resp = self.http_session.get(
req_url,
params={"permanode": blobref},
)
if resp.status_code != 200:
from camlistore.exceptions import ServerError
raise ServerError(
"Failed to get claims for %s: server returned %i %s" % (
blobref,
resp.status_code,
resp.reason,
)
)
raw = json.loads(resp.content)
return [
ClaimMeta(x) for x in raw["claims"]
]
class SearchResult(object):
"""
Represents a search result from :py:meth:`SearchClient.query`.
"""
#: The blobref of the blob represented by this search result.
blobref = None
def __init__(self, blobref):
self.blobref = blobref
def __repr__(self):
return "<camlistore.searchclient.SearchResult %s>" % self.blobref
class BlobDescription(object):
"""
Represents the indexer's description of a blob, from
:py:meth:`SearchClient.describe_blob`.
"""
def __init__(self, searcher, raw_dict, other_raw_dicts={}):
self.searcher = searcher
self.raw_dict = raw_dict
self.other_raw_dicts = other_raw_dicts
@property
def blobref(self):
"""
The blobref of the blob being described.
"""
return self.raw_dict.get("blobRef")
@property
def type(self):
"""
The indexer's idea of the type of the blob.
"""
return self.raw_dict.get("camliType")
@property
def size(self):
"""
The indexer's idea of the size of the blob.
"""
return self.raw_dict.get("size")
# plus some other stuff that varies depending on type
# https://github.com/bradfitz/camlistore/blob/
# ca58231336e5711abacb059763beb06e8b2b1788/pkg/search/handler.go#L722
def describe_another(self, blobref):
"""
Obtain a description of another related blob.
When asked for a description, the indexer also returns descriptions
of some of the objects related to the requested object, such as
the files in a directory.
This interface allows callers to retrieve related objects while
possibly making use of that already-retrieved data, falling back
on a new call to the indexer if the requested blob was not already
described.
Since this method sometimes uses data retrieved earlier, it may return
stale data. If the latest data is absolutely required, prefer to
call directly :py:meth:`SearchClient.describe_blob`.
"""
if blobref in self.other_raw_dicts:
return BlobDescription(
self.searcher,
self.other_raw_dicts[blobref],
self.other_raw_dicts,
)
else:
return self.searcher.describe_blob(blobref)
def __repr__(self):
return "<camlistore.searchclient.BlobDescription %s %s>" % (
self.type if self.type is not None else "(unknown)",
self.blobref if self.blobref is not None else "(unknown)",
)
class ClaimMeta(object):
"""
Description of a claim.
A claim is a description of a mutation against a permanode. The
indexer aggregates claims to decide the state of a permanode
for a given point in time.
The :py:attr:`type` attribute represents the kind of mutation, and
a different subset of the other attributes will be populated depending
on that type.
"""
def __init__(self, raw_dict):
self.raw_dict = raw_dict
@property
def type(self):
"""
The type of mutation being performed by this claim.
"""
return self.raw_dict.get("type")
@property
def signer_blobref(self):
"""
The blobref of the public key of the party that made this claim,
against which the claim's signature was verified.
"""
return self.raw_dict.get("signer")
@property
def attr(self):
"""
For claims that mutate attributes, the name of the attribute that
this claim mutates, as a string.
"""
return str(self.raw_dict.get("attr"))
@property
def value(self):
"""
For claims that mutate attributes, the value applied to the mutation.
"""
return self.raw_dict.get("value")
@property
def blobref(self):
"""
The blobref of the underlying claim object.
"""
return self.raw_dict.get("blobref")
@property
def target_blobref(self):
"""
For claim types that have target blobs, the blobref of the claim's
target.
"""
return self.raw_dict.get("target")
@property
def time(self):
"""
The time at which the claim was made, as a
:py:class:datetime.datetime:. The timestamps of claims are used
to order them and to allow the indexer to decide the state of
a permanode on any given date, by filtering later permanodes.
"""
from dateutil.parser import parse
raw = self.raw_dict.get("date")
if raw is not None:
return parse(raw)
else:
return None
@property
def permanode_blobref(self):
"""
The blobref of the permanode to which this claim applies.
"""
return self.raw_dict.get("permanode")
def __repr__(self):
parts = ["camlistore.searchclient.ClaimMeta", self.type]
attr = self.attr
value = self.value
target = self.target_blobref
if attr is not None:
parts.append(attr + ":")
if value is not None:
parts.append(repr(value))
if target is not None:
parts.append(target)
return "<%s>" % " ".join(parts)
| {
"content_hash": "efd89662d9df3ae6010309e7788aac81",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 79,
"avg_line_length": 31.47462686567164,
"alnum_prop": 0.5919954476479514,
"repo_name": "apparentlymart/python-camlistore",
"id": "88a22cdcf07d5bc0869c01c1a90ec33df259868b",
"size": "10546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camlistore/searchclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58947"
}
],
"symlink_target": ""
} |
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
from pyKriging.samplingplan import samplingplan
# The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here
sp = samplingplan(2)
X = sp.optimallhc(15)
# Next, we define the problem we would like to solve
testfun = pyKriging.testfunctions().branin
# We generate our observed values based on our sampling plan and the test function
y = testfun(X)
print 'Setting up the Kriging Model'
# Now that we have our initial data, we can create an instance of a kriging model
k = kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
k.train(optimizer='ga')
k.snapshot()
for i in range(5):
newpoints = k.infill(2)
for point in newpoints:
print 'Adding point {}'.format(point)
k.addPoint(point, testfun(point)[0])
k.train()
k.snapshot()
# #And plot the model
print 'Now plotting final results...'
k.plot()
| {
"content_hash": "a2472aabef39e1b163458ab8eddf251d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 94,
"avg_line_length": 28.176470588235293,
"alnum_prop": 0.7254697286012526,
"repo_name": "DailyActie/Surrogate-Model",
"id": "dfad144a290594a08fff08bffaedc4a12e43b65e",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/pyKriging-master/examples/2D_simple_train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from commonrepo.users.models import User as User
from .models import Snippet
from .permissions import IsOwnerOrReadOnly
from .serializers import SnippetSerializer
class SnippetViewSet(viewsets.ModelViewSet):
"""
This endpoint presents code snippets.
The `highlight` field presents a hyperlink to the hightlighted HTML
representation of the code snippet.
The **owner** of the code snippet may update or delete instances
of the code snippet.
Try it yourself by logging in as one of these four users: **amy**, **max**,
**jose** or **aziz**. The passwords are the same as the usernames.
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
@detail_route(renderer_classes=(renderers.StaticHTMLRenderer,))
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
| {
"content_hash": "dfc99ebd4c5a8f7c1686c366c5d38eb4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 34.8,
"alnum_prop": 0.7503192848020435,
"repo_name": "yrchen/CommonRepo",
"id": "a33462ec239a686d6b2461c8b0f41835a48a4b8d",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "commonrepo/snippets_api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "209557"
},
{
"name": "JavaScript",
"bytes": "3462"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "336120"
},
{
"name": "Shell",
"bytes": "4523"
}
],
"symlink_target": ""
} |
"""
test__offline
"""
from __future__ import absolute_import
import re
import json as _json
from unittest import TestCase
import pytest
import plotly
from plotly import optional_imports
matplotlylib = optional_imports.get_module("plotly.matplotlylib")
if matplotlylib:
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
import matplotlib.pyplot as plt
PLOTLYJS = plotly.offline.offline.get_plotlyjs()
class PlotlyOfflineTestCase(TestCase):
def setUp(self):
pass
def test_iplot_works_without_init_notebook_mode(self):
plotly.offline.iplot([{}])
def test_iplot_works_after_you_call_init_notebook_mode(self):
plotly.offline.init_notebook_mode()
plotly.offline.iplot([{}])
if matplotlylib:
@pytest.mark.matplotlib
def test_iplot_mpl_works(self):
# Generate matplotlib plot for tests
fig = plt.figure()
x = [10, 20, 30]
y = [100, 200, 300]
plt.plot(x, y)
plotly.offline.iplot_mpl(fig)
class PlotlyOfflineMPLTestCase(TestCase):
def setUp(self):
pass
def _read_html(self, file_url):
""" Read and return the HTML contents from a file_url in the
form e.g. file:///Users/chriddyp/Repos/plotly.py/plotly-temp.html
"""
with open(file_url.replace("file://", "").replace(" ", "")) as f:
return f.read()
if matplotlylib:
@pytest.mark.matplotlib
def test_default_mpl_plot_generates_expected_html(self):
# Generate matplotlib plot for tests
fig = plt.figure()
x = [10, 20, 30]
y = [100, 200, 300]
plt.plot(x, y)
figure = plotly.tools.mpl_to_plotly(fig).to_dict()
data = figure["data"]
layout = figure["layout"]
data_json = _json.dumps(
data, cls=plotly.utils.PlotlyJSONEncoder, sort_keys=True
)
layout_json = _json.dumps(
layout, cls=plotly.utils.PlotlyJSONEncoder, sort_keys=True
)
html = self._read_html(plotly.offline.plot_mpl(fig))
# blank out uid before comparisons
data_json = re.sub('"uid": "[^"]+"', '"uid": ""', data_json)
html = re.sub('"uid": "[^"]+"', '"uid": ""', html)
# just make sure a few of the parts are in here
# like PlotlyOfflineTestCase(TestCase) in test_core
self.assertTrue(data_json in html) # data is in there
self.assertTrue(layout_json in html) # layout is in there too
self.assertTrue(PLOTLYJS in html) # and the source code
# and it's an <html> doc
self.assertTrue(html.startswith("<html>") and html.endswith("</html>"))
| {
"content_hash": "68cf027fdab514103a82449cab3bd7b9",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 29.329896907216494,
"alnum_prop": 0.5859402460456942,
"repo_name": "plotly/python-api",
"id": "d36934d1eae317b3b12c5b080a78b38be2cd250e",
"size": "2845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/tests/test_optional/test_offline/test_offline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
from contextlib import contextmanager
from collections import defaultdict
import importlib.util
from veros import logger, runtime_settings, runtime_state, timer
from veros.state import get_default_state, resize_dimension
from veros.variables import get_shape
# all variables that are re-named or unique to Veros
VEROS_TO_PYOM_VAR = dict(
# do not exist in pyom
time=None,
prho=None,
land_map=None,
isle=None,
boundary_mask=None,
line_dir_south_mask=None,
line_dir_east_mask=None,
line_dir_north_mask=None,
line_dir_west_mask=None,
)
# all setting that are re-named or unique to Veros
VEROS_TO_PYOM_SETTING = dict(
# do not exist in pyom
identifier=None,
enable_noslip_lateral=None,
restart_input_filename=None,
restart_output_filename=None,
restart_frequency=None,
kappaH_min=None,
enable_kappaH_profile=None,
enable_Prandtl_tke=None,
Prandtl_tke0=None,
biharmonic_friction_cosPower=None,
# constants
pi=None,
radius=None,
degtom=None,
mtodeg=None,
omega=None,
rho_0=None,
grav=None,
)
STREAMFUNCTION_VARS = ("psin", "dpsin", "line_psin")
def _load_fortran_module(module, path):
spec = importlib.util.spec_from_file_location(module, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def load_pyom(pyom_lib):
try:
pyom_obj = _load_fortran_module("pyOM_code_MPI", pyom_lib)
has_mpi = True
except ImportError:
pyom_obj = _load_fortran_module("pyOM_code", pyom_lib)
has_mpi = False
if runtime_state.proc_num > 1 and not has_mpi:
raise RuntimeError("Given PyOM2 library was not built with MPI support")
return pyom_obj
@contextmanager
def suppress_stdout(stdout_fd=1):
old_stdout = os.dup(stdout_fd)
with open(os.devnull, "wb") as void:
os.dup2(void.fileno(), stdout_fd)
try:
yield
finally:
with os.fdopen(old_stdout, "wb") as std:
os.dup2(std.fileno(), stdout_fd)
def pyom_from_state(state, pyom_obj, ignore_attrs=None, init_streamfunction=True):
"""Force-updates internal PyOM library state to match given Veros state."""
if ignore_attrs is None:
ignore_attrs = []
pyom_modules = (
pyom_obj.main_module,
pyom_obj.isoneutral_module,
pyom_obj.idemix_module,
pyom_obj.tke_module,
pyom_obj.eke_module,
)
def set_fortran_attr(attr, val):
# fortran interface is all lower-case
attr = attr.lower()
for module in pyom_modules:
if hasattr(module, attr):
setattr(module, attr, val)
break
else:
raise RuntimeError(f"Could not set attribute {attr} on Fortran library")
# settings
for setting, val in state.settings.items():
setting = VEROS_TO_PYOM_SETTING.get(setting, setting)
if setting is None or setting in ignore_attrs:
continue
set_fortran_attr(setting, val)
_override_settings(pyom_obj)
# allocate variables
if runtime_state.proc_num > 1:
pyom_obj.my_mpi_init(runtime_settings.mpi_comm.py2f())
else:
pyom_obj.my_mpi_init(0)
pyom_obj.pe_decomposition()
pyom_obj.allocate_main_module()
pyom_obj.allocate_isoneutral_module()
pyom_obj.allocate_tke_module()
pyom_obj.allocate_eke_module()
pyom_obj.allocate_idemix_module()
# set variables
for var, val in state.variables.items():
var = VEROS_TO_PYOM_VAR.get(var, var)
if var is None or var in ignore_attrs:
continue
if var in STREAMFUNCTION_VARS:
continue
set_fortran_attr(var, val)
if init_streamfunction:
with suppress_stdout():
pyom_obj.streamfunction_init()
for var in STREAMFUNCTION_VARS:
set_fortran_attr(var, state.variables.get(var))
# correct for 1-based indexing
pyom_obj.main_module.tau += 1
pyom_obj.main_module.taup1 += 1
pyom_obj.main_module.taum1 += 1
# diagnostics
diag_settings = (
("cfl_monitor", "output_frequency", "ts_monint"),
("tracer_monitor", "output_frequency", "trac_cont_int"),
("snapshot", "output_frequency", "snapint"),
("averages", "output_frequency", "aveint"),
("averages", "sampling_frequency", "avefreq"),
("overturning", "output_frequency", "overint"),
("overturning", "sampling_frequency", "overfreq"),
("energy", "output_frequency", "energint"),
("energy", "sampling_frequency", "energfreq"),
)
for diag, param, attr in diag_settings:
if diag in state.diagnostics:
set_fortran_attr(attr, getattr(diag, param))
return pyom_obj
def _override_settings(pyom_obj):
"""Manually force some settings to ensure compatibility."""
m = pyom_obj.main_module
m.n_pes_i, m.n_pes_j = runtime_settings.num_proc
# define processor boundary idx (1-based)
ipx, ipy = runtime_state.proc_idx
m.is_pe = (m.nx // m.n_pes_i) * ipx + 1
m.ie_pe = (m.nx // m.n_pes_i) * (ipx + 1)
m.js_pe = (m.ny // m.n_pes_j) * ipy + 1
m.je_pe = (m.ny // m.n_pes_j) * (ipy + 1)
# force settings that are not supported by Veros
idm = pyom_obj.idemix_module
eke = pyom_obj.eke_module
m.enable_streamfunction = True
m.enable_hydrostatic = True
m.congr_epsilon = 1e-8
m.congr_max_iterations = 10_000
m.enable_congrad_verbose = False
m.enable_free_surface = False
eke.enable_eke_leewave_dissipation = False
idm.enable_idemix_m2 = False
idm.enable_idemix_niw = False
return pyom_obj
def state_from_pyom(pyom_obj):
from veros.core.operators import numpy as npx
state = get_default_state()
pyom_modules = (
pyom_obj.main_module,
pyom_obj.isoneutral_module,
pyom_obj.idemix_module,
pyom_obj.tke_module,
pyom_obj.eke_module,
)
def get_fortran_attr(attr):
# fortran interface is all lower-case
attr = attr.lower()
for module in pyom_modules:
if hasattr(module, attr):
return getattr(module, attr)
else:
raise RuntimeError(f"Could not get attribute {attr} from Fortran library")
with state.settings.unlock():
for setting in state.settings.fields():
setting = VEROS_TO_PYOM_SETTING.get(setting, setting)
if setting is None:
continue
state.settings.update({setting: get_fortran_attr(setting)})
state.initialize_variables()
resize_dimension(state, "isle", int(pyom_obj.main_module.nisle))
with state.variables.unlock():
state.variables.isle = npx.arange(state.dimensions["isle"])
for var, val in state.variables.items():
var = VEROS_TO_PYOM_VAR.get(var, var)
if var is None:
continue
try:
new_val = get_fortran_attr(var)
except RuntimeError:
continue
if new_val is None:
continue
try:
new_val = npx.broadcast_to(new_val, val.shape)
except ValueError:
raise ValueError(f"variable {var} has incompatible shapes: {val.shape}, {new_val.shape}")
state.variables.update({var: new_val})
return state
def setup_pyom(pyom_obj, set_parameter, set_grid, set_coriolis, set_topography, set_initial_conditions, set_forcing):
if runtime_state.proc_num > 1:
pyom_obj.my_mpi_init(runtime_settings.mpi_comm.py2f())
else:
pyom_obj.my_mpi_init(0)
set_parameter(pyom_obj)
pyom_obj.pe_decomposition()
pyom_obj.allocate_main_module()
pyom_obj.allocate_isoneutral_module()
pyom_obj.allocate_tke_module()
pyom_obj.allocate_eke_module()
pyom_obj.allocate_idemix_module()
set_grid(pyom_obj)
pyom_obj.calc_grid()
set_coriolis(pyom_obj)
pyom_obj.calc_beta()
set_topography(pyom_obj)
pyom_obj.calc_topo()
pyom_obj.calc_spectral_topo()
set_initial_conditions(pyom_obj)
pyom_obj.calc_initial_conditions()
pyom_obj.streamfunction_init()
set_forcing(pyom_obj)
pyom_obj.check_isoneutral_slope_crit()
def run_pyom(pyom_obj, set_forcing, after_timestep=None):
timers = defaultdict(timer.Timer)
f = pyom_obj
m = pyom_obj.main_module
idm = pyom_obj.idemix_module
ekm = pyom_obj.eke_module
tkm = pyom_obj.tke_module
logger.info(f"Starting integration for {float(m.runlen):.2e}s")
m.time = 0.0
while m.time < m.runlen:
logger.info(f"Current iteration: {m.itt}")
with timers["main"]:
set_forcing(pyom_obj)
if idm.enable_idemix:
f.set_idemix_parameter()
f.set_eke_diffusivities()
f.set_tke_diffusivities()
with timers["momentum"]:
f.momentum()
with timers["temperature"]:
f.thermodynamics()
if ekm.enable_eke or tkm.enable_tke or idm.enable_idemix:
f.calculate_velocity_on_wgrid()
with timers["eke"]:
if ekm.enable_eke:
f.integrate_eke()
with timers["idemix"]:
if idm.enable_idemix:
f.integrate_idemix()
with timers["tke"]:
if tkm.enable_tke:
f.integrate_tke()
"""
Main boundary exchange
for density, temp and salt this is done in integrate_tempsalt.f90
"""
f.border_exchg_xyz(
m.is_pe - m.onx, m.ie_pe + m.onx, m.js_pe - m.onx, m.je_pe + m.onx, m.u[:, :, :, m.taup1 - 1], m.nz
)
f.setcyclic_xyz(
m.is_pe - m.onx, m.ie_pe + m.onx, m.js_pe - m.onx, m.je_pe + m.onx, m.u[:, :, :, m.taup1 - 1], m.nz
)
f.border_exchg_xyz(
m.is_pe - m.onx, m.ie_pe + m.onx, m.js_pe - m.onx, m.je_pe + m.onx, m.v[:, :, :, m.taup1 - 1], m.nz
)
f.setcyclic_xyz(
m.is_pe - m.onx, m.ie_pe + m.onx, m.js_pe - m.onx, m.je_pe + m.onx, m.v[:, :, :, m.taup1 - 1], m.nz
)
if tkm.enable_tke:
f.border_exchg_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
tkm.tke[:, :, :, m.taup1 - 1],
m.nz,
)
f.setcyclic_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
tkm.tke[:, :, :, m.taup1 - 1],
m.nz,
)
if ekm.enable_eke:
f.border_exchg_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
ekm.eke[:, :, :, m.taup1 - 1],
m.nz,
)
f.setcyclic_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
ekm.eke[:, :, :, m.taup1 - 1],
m.nz,
)
if idm.enable_idemix:
f.border_exchg_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
idm.e_iw[:, :, :, m.taup1 - 1],
m.nz,
)
f.setcyclic_xyz(
m.is_pe - m.onx,
m.ie_pe + m.onx,
m.js_pe - m.onx,
m.je_pe + m.onx,
idm.e_iw[:, :, :, m.taup1 - 1],
m.nz,
)
# diagnose vertical velocity at taup1
f.vertical_velocity()
# diagnose isoneutral streamfunction regardless of output settings
f.isoneutral_diag_streamfunction()
# shift time
m.itt += 1
m.time += m.dt_tracer
if callable(after_timestep):
after_timestep(pyom_obj)
orig_taum1 = int(m.taum1)
m.taum1 = m.tau
m.tau = m.taup1
m.taup1 = orig_taum1
# NOTE: benchmarks parse this, do not change / remove
logger.debug("Time step took {}s", timers["main"].last_time)
logger.debug("Timing summary:")
logger.debug(" setup time summary = {}s", timers["setup"].total_time)
logger.debug(" main loop time summary = {}s", timers["main"].total_time)
logger.debug(" momentum = {}s", timers["momentum"].total_time)
logger.debug(" thermodynamics = {}s", timers["temperature"].total_time)
logger.debug(" EKE = {}s", timers["eke"].total_time)
logger.debug(" IDEMIX = {}s", timers["idemix"].total_time)
logger.debug(" TKE = {}s", timers["tke"].total_time)
def _generate_random_var(state, var):
import numpy as onp
meta = state.var_meta[var]
shape = get_shape(state.dimensions, meta.dims)
global_shape = get_shape(state.dimensions, meta.dims, local=False)
if var == "kbot":
val = onp.zeros(shape)
val[2:-2, 2:-2] = onp.random.randint(1, state.dimensions["zt"], size=(shape[0] - 4, shape[1] - 4))
island_mask = onp.random.choice(val[3:-3, 3:-3].size, size=10)
val[3:-3, 3:-3].flat[island_mask] = 0
return val
if var in ("dxt", "dxu", "dyt", "dyu"):
if state.settings.coord_degree:
val = 80 / global_shape[0] * (1 + 1e-2 * onp.random.randn(*shape))
else:
val = 10_000e3 / global_shape[0] * (1 + 1e-2 * onp.random.randn(*shape))
return val
if var in ("dzt", "dzw"):
val = 6000 / global_shape[0] * (1 + 1e-2 * onp.random.randn(*shape))
return val
if onp.issubdtype(onp.dtype(meta.dtype), onp.floating):
val = onp.random.randn(*shape)
if var in ("salt",):
val = 35 + val
return val
if onp.issubdtype(onp.dtype(meta.dtype), onp.integer):
val = onp.random.randint(0, 100, size=shape)
return val
if onp.issubdtype(onp.dtype(meta.dtype), onp.bool_):
return onp.random.randint(0, 1, size=shape, dtype="bool")
raise TypeError(f"got unrecognized dtype: {meta.dtype}")
def get_random_state(pyom2_lib=None, extra_settings=None):
"""Generates random Veros and PyOM states (for testing)"""
from veros.core import numerics, streamfunction
if extra_settings is None:
extra_settings = {}
state = get_default_state()
settings = state.settings
with settings.unlock():
settings.update(extra_settings)
state.initialize_variables()
state.variables.__locked__ = False # leave variables unlocked
for var, meta in state.var_meta.items():
if not meta.active:
continue
if var in ("tau", "taup1", "taum1"):
continue
val = _generate_random_var(state, var)
setattr(state.variables, var, val)
# ensure that masks and geometries are consistent with grid spacings
numerics.calc_grid(state)
numerics.calc_topo(state)
streamfunction.streamfunction_init(state)
if pyom2_lib is None:
return state
pyom_obj = load_pyom(pyom2_lib)
pyom_obj = pyom_from_state(state, pyom_obj)
return state, pyom_obj
| {
"content_hash": "e1bf7ba9bcc7aee65f5caab03495132d",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 117,
"avg_line_length": 30.017110266159698,
"alnum_prop": 0.5556400025334093,
"repo_name": "dionhaefner/veros",
"id": "f3ce3cc3e73a3ecedf9f039f4682bbbfd8086ca6",
"size": "15789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "veros/pyom_compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "310"
},
{
"name": "Cuda",
"bytes": "3112"
},
{
"name": "Cython",
"bytes": "4658"
},
{
"name": "Python",
"bytes": "732116"
}
],
"symlink_target": ""
} |
""" Service unit testing best practice, with an alternative dependency.
"""
import pytest
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from nameko.rpc import rpc
from nameko.testing.services import worker_factory
# using community extension from http://pypi.python.org/pypi/nameko-sqlalchemy
from nameko_sqlalchemy import Session
Base = declarative_base()
class Result(Base):
__tablename__ = 'model'
id = Column(Integer, primary_key=True)
value = Column(String(64))
class Service:
""" Service under test
"""
name = "service"
db = Session(Base)
@rpc
def save(self, value):
result = Result(value=value)
self.db.add(result)
self.db.commit()
@pytest.fixture
def session():
""" Create a test database and session
"""
engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
session_cls = sessionmaker(bind=engine)
return session_cls()
def test_service(session):
# create instance, providing the test database session
service = worker_factory(Service, db=session)
# verify ``save`` logic by querying the test database
service.save("helloworld")
assert session.query(Result.value).all() == [("helloworld",)]
| {
"content_hash": "bc78a28e34b58fc3e17f2776822d8f5e",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 24.214285714285715,
"alnum_prop": 0.6969026548672567,
"repo_name": "Alecto3-D/testable-greeter",
"id": "da8d951427c3fa54384e1b42bcffa566b74d056f",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nameko/docs/examples/testing/alternative_dependency_unit_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1340"
},
{
"name": "JavaScript",
"bytes": "6003191"
},
{
"name": "Makefile",
"bytes": "7521"
},
{
"name": "Python",
"bytes": "4833445"
},
{
"name": "RAML",
"bytes": "62192"
},
{
"name": "Shell",
"bytes": "3682"
}
],
"symlink_target": ""
} |
import string
from datetime import timedelta, datetime
import csv
import os
import shutil
import pickle
# math
import numpy as np
from scipy.sparse import *
# mabed
import mabed.utils as utils
__authors__ = "Adrien Guille, Nicolas Dugué"
__email__ = "adrien.guille@univ-lyon2.fr"
class Corpus:
def __init__(self, source_file_path, stopwords_file_path, min_absolute_freq=10, max_relative_freq=0.4, separator='\t', save_voc=False):
self.source_file_path = source_file_path
self.size = 0
self.start_date = '3000-01-01 00:00:00'
self.end_date = '1000-01-01 00:00:00'
self.separator = separator
# load stop-words
self.stopwords = utils.load_stopwords(stopwords_file_path)
# identify features
with open(source_file_path, 'r') as input_file:
csv_reader = csv.reader(input_file, delimiter=self.separator)
header = next(csv_reader)
text_column_index = header.index('text')
date_column_index = header.index('date')
word_frequency = {}
for line in csv_reader:
self.size += 1
words = self.tokenize(line[text_column_index])
date = line[date_column_index]
if date > self.end_date:
self.end_date = date
elif date < self.start_date:
self.start_date = date
# update word frequency
for word in words:
if len(word) > 1:
frequency = word_frequency.get(word)
if frequency is None:
frequency = 0
word_frequency[word] = frequency + 1
# sort words w.r.t frequency
vocabulary = list(word_frequency.items())
vocabulary.sort(key=lambda x: x[1], reverse=True)
if save_voc:
with open('vocabulary.pickle', 'wb') as output_file:
pickle.dump(vocabulary, output_file)
self.vocabulary = {}
vocabulary_size = 0
# construct the vocabulary map
for word, frequency in vocabulary:
if frequency > min_absolute_freq and float(frequency / self.size) < max_relative_freq and word not in self.stopwords:
self.vocabulary[word] = vocabulary_size
vocabulary_size += 1
self.start_date = datetime.strptime(self.start_date, "%Y-%m-%d %H:%M:%S")
self.end_date = datetime.strptime(self.end_date, "%Y-%m-%d %H:%M:%S")
print(' Corpus: %i tweets, spanning from %s to %s' % (self.size,
self.start_date,
self.end_date))
print(' Vocabulary: %d distinct words' % vocabulary_size)
self.time_slice_count = None
self.tweet_count = None
self.global_freq = None
self.mention_freq = None
self.time_slice_length = None
def discretize(self, time_slice_length):
self.time_slice_length = time_slice_length
# clean the data directory
if os.path.exists('corpus'):
shutil.rmtree('corpus')
os.makedirs('corpus')
# compute the total number of time-slices
time_delta = (self.end_date - self.start_date)
time_delta = time_delta.total_seconds()/60
self.time_slice_count = int(time_delta // self.time_slice_length) + 1
self.tweet_count = np.zeros(self.time_slice_count)
print(' Number of time-slices: %d' % self.time_slice_count)
# create empty files
for time_slice in range(self.time_slice_count):
dummy_file = open('corpus/' + str(time_slice), 'w')
dummy_file.write('')
# compute word frequency
self.global_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
self.mention_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
with open(self.source_file_path, 'r') as input_file:
csv_reader = csv.reader(input_file, delimiter=self.separator)
header = next(csv_reader)
text_column_index = header.index('text')
date_column_index = header.index('date')
for line in csv_reader:
tweet_date = datetime.strptime(line[date_column_index], "%Y-%m-%d %H:%M:%S")
time_delta = (tweet_date - self.start_date)
time_delta = time_delta.total_seconds() / 60
time_slice = int(time_delta / self.time_slice_length)
self.tweet_count[time_slice] += 1
# tokenize the tweet and update word frequency
tweet_text = line[text_column_index]
words = self.tokenize(tweet_text)
mention = '@' in tweet_text
for word in set(words):
word_id = self.vocabulary.get(word)
if word_id is not None:
self.global_freq[word_id, time_slice] += 1
if mention:
self.mention_freq[word_id, time_slice] += 1
with open('corpus/' + str(time_slice), 'a') as time_slice_file:
time_slice_file.write(tweet_text+'\n')
self.global_freq = self.global_freq.tocsr()
self.mention_freq = self.mention_freq.tocsr()
def to_date(self, time_slice):
a_date = self.start_date + timedelta(minutes=time_slice*self.time_slice_length)
return a_date
def tokenize(self, text):
# split the documents into tokens based on whitespaces
raw_tokens = text.split()
# trim punctuation and convert to lower case
return [token.strip(string.punctuation).lower() for token in raw_tokens if len(token) > 1 and 'http' not in token]
def cooccurring_words(self, event, p):
main_word = event[2]
word_frequency = {}
for i in range(event[1][0], event[1][1] + 1):
with open('corpus/' + str(i), 'r') as input_file:
for tweet_text in input_file.readlines():
words = self.tokenize(tweet_text)
if event[2] in words:
for word in words:
if word != main_word:
if len(word) > 1 and self.vocabulary.get(word) is not None:
frequency = word_frequency.get(word)
if frequency is None:
frequency = 0
word_frequency[word] = frequency + 1
# sort words w.r.t frequency
vocabulary = list(word_frequency.items())
vocabulary.sort(key=lambda x: x[1], reverse=True)
top_cooccurring_words = []
for word, frequency in vocabulary:
top_cooccurring_words.append(word)
if len(top_cooccurring_words) == p:
# return the p words that co-occur the most with the main word
return top_cooccurring_words
| {
"content_hash": "83b9d46ae7c052698a690adae50363a8",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 139,
"avg_line_length": 45.3875,
"alnum_prop": 0.5414486367391903,
"repo_name": "AdrienGuille/pyMABED",
"id": "f9638c67e71f6f3fe552ecdc65c662487626ab10",
"size": "7286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mabed/corpus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14527"
},
{
"name": "HTML",
"bytes": "3558"
},
{
"name": "Python",
"bytes": "23494"
}
],
"symlink_target": ""
} |
import os
from twilio.rest import Client
# To set up environmental variables, see http://twil.io/secure
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
client = Client(ACCOUNT_SID, AUTH_TOKEN)
notification = client.notify.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.notifications.create(
# We recommend using a GUID or other anonymized identifier for Identity
identity='00000001',
body='Knok-Knok! This is your first Notify SMS')
print(notification.sid)
| {
"content_hash": "66883553b03de5eea2269e5ec680b718",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.7420560747663552,
"repo_name": "TwilioDevEd/api-snippets",
"id": "39f91f6c214e363313648c6ada649149dd4184ce",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications/sms-quickstart/send-notification/send-notification.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH, GCC_CS_PATH, CW_EWL_PATH, CW_GCC_PATH
from workspace_tools.settings import GOANNA_PATH
from workspace_tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
CIRCULAR_DEPENDENCIES = True
DIAGNOSTIC_PATTERN = re.compile('((?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path=""):
mbedToolchain.__init__(self, target, options, notify, macros, silent)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
] + self.cpu
if "save-asm" in self.options:
common_flags.append("-save-temps")
if "debug-info" in self.options:
common_flags.append("-g")
common_flags.append("-O0")
else:
common_flags.append("-O2")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc, "-x", "assembler-with-cpp"] + common_flags
if not "analyze" in self.options:
self.cc = [main_cc, "-std=gnu99"] + common_flags
self.cppc =[main_cppc, "-std=gnu++98", "-fno-rtti"] + common_flags
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "-std=gnu99", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cppc.replace('\\', '/'), "-std=gnu++98", "-fno-rtti", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.ld = [join(tool_path, "arm-none-eabi-gcc"), "-Wl,--gc-sections", "-Wl,--wrap,main"] + self.cpu
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def assemble(self, source, object, includes):
return [self.hook.get_cmdline_assembler(self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-o", object, source])]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines()[1:]:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append(file.replace('\a', ' '))
else:
dependencies = dependencies + [f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def parse_output(self, output):
# The warning/error notification is multiline
WHERE, WHAT = 0, 1
state, file, message = WHERE, None, None
for line in output.splitlines():
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
continue
# Each line should start with the file information: "filepath: ..."
# i should point past the file path ^
# avoid the first column in Windows (C:\)
i = line.find(':', 2)
if i == -1: continue
if state == WHERE:
file = line[:i]
message = line[i+1:].strip() + ' '
state = WHAT
elif state == WHAT:
match = GCC.DIAGNOSTIC_PATTERN.match(line[i+1:])
if match is None:
state = WHERE
continue
self.cc_info(
match.group('severity'),
file, match.group('line'),
message + match.group('message')
)
def archive(self, objects, lib_path):
self.default_cmd([self.ar, "rcs", lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# NOTE: There is a circular dependency between the mbed library and the clib
# We could define a set of week symbols to satisfy the clib dependencies in "sys.o",
# but if an application uses only clib symbols and not mbed symbols, then the final
# image is not correctly retargeted
if self.CIRCULAR_DEPENDENCIES:
libs.extend(libs)
self.default_cmd(self.hook.get_cmdline_linker(self.ld + ["-T%s" % mem_map, "-o", output] +
objects + ["-L%s" % L for L in lib_dirs] + libs))
@hook_tool
def binary(self, resources, elf, bin):
self.default_cmd(self.hook.get_cmdline_binary([self.elf2bin, "-O", "binary", elf, bin]))
class GCC_ARM(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_ARM_PATH)
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H", "ARCH_MAX", "DISCO_F407VG", "DISCO_F429ZI", "DISCO_F469NI", "NUCLEO_F401RE", "NUCLEO_F411RE", "NUCLEO_F446RE", "ELMO_F411RE", "MTS_MDOT_F411RE", "MTS_DRAGONFLY_F411RE"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CR_PATH)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
class GCC_CS(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CS_PATH)
class GCC_CW(GCC):
ARCH_LIB = {
"Cortex-M0+": "armv6-m",
}
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, CW_GCC_PATH)
class GCC_CW_EWL(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
# Compiler
common = [
'-mfloat-abi=soft',
'-nostdinc', '-I%s' % join(CW_EWL_PATH, "EWL_C", "include"),
]
self.cc += common + [
'-include', join(CW_EWL_PATH, "EWL_C", "include", 'lib_c99.prefix')
]
self.cppc += common + [
'-nostdinc++', '-I%s' % join(CW_EWL_PATH, "EWL_C++", "include"),
'-include', join(CW_EWL_PATH, "EWL_C++", "include", 'lib_ewl_c++.prefix')
]
# Linker
self.sys_libs = []
self.CIRCULAR_DEPENDENCIES = False
self.ld = [join(CW_GCC_PATH, "arm-none-eabi-g++"),
"-Xlinker --gc-sections",
"-L%s" % join(CW_EWL_PATH, "lib", GCC_CW.ARCH_LIB[target.core]),
"-n", "-specs=ewl_c++.specs", "-mfloat-abi=soft",
"-Xlinker --undefined=__pformatter_", "-Xlinker --defsym=__pformatter=__pformatter_",
"-Xlinker --undefined=__sformatter", "-Xlinker --defsym=__sformatter=__sformatter",
] + self.cpu
class GCC_CW_NEWLIB(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
| {
"content_hash": "c1d89d2e6d333e69018f9d95311953c2",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 208,
"avg_line_length": 42.70038910505836,
"alnum_prop": 0.5644250045562238,
"repo_name": "K4zuki/mbed",
"id": "d6746ccc4b981a4bae16bd2155ea541d1fb1fa3c",
"size": "10974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace_tools/toolchains/gcc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4129310"
},
{
"name": "C",
"bytes": "108988172"
},
{
"name": "C++",
"bytes": "6363678"
},
{
"name": "CMake",
"bytes": "4724"
},
{
"name": "HTML",
"bytes": "819320"
},
{
"name": "JavaScript",
"bytes": "1494"
},
{
"name": "Makefile",
"bytes": "181"
},
{
"name": "Objective-C",
"bytes": "59134"
},
{
"name": "Python",
"bytes": "638934"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
import threading
import weakref
from django.utils.inspect import func_accepts_kwargs
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _ in self.receivers):
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| {
"content_hash": "51d4012dc82b9476777dd95c9be40d43",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 94,
"avg_line_length": 36.72108843537415,
"alnum_prop": 0.5808632826972953,
"repo_name": "reinout/django",
"id": "6488c7fbd4516323b1faea40a0f115c06a17f25a",
"size": "10796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/dispatch/dispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12147106"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM4_if_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM4_if_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM4_if_ConnectedLHS, self).__init__(name='HMM4_if_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM4_if')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "bb791d639bf254534ab04ae1f08aaff5",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 125,
"avg_line_length": 43.19672131147541,
"alnum_prop": 0.4713472485768501,
"repo_name": "levilucio/SyVOLT",
"id": "6f8b1077c1d224d0987fd24bee26e9416c8271d6",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/from_thesis/HMM4_if_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import logging
from datetime import timedelta, datetime
from core.errors import ObservableValidationError
from core.feed import Feed
from core.observables import File, Hash, Hostname
class HybridAnalysis(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "HybridAnalysis",
"source": "https://www.hybrid-analysis.com/feed?json",
"description": "Hybrid Analysis Public Feeds",
}
def update(self):
for index, item in self.update_json(
headers={"User-agent": "VxApi Connector"},
key="data",
filter_row="analysis_start_time",
):
self.analyze(item)
# pylint: disable=arguments-differ
def analyze(self, item):
first_seen = item["analysis_start_time"]
f_hyb = File.get_or_create(value="FILE:{}".format(item["sha256"]))
sha256 = Hash.get_or_create(value=item["sha256"])
f_hyb.active_link_to(sha256, "sha256", self.name)
tags = []
context = {
"source": self.name,
"date": first_seen,
"date_added": datetime.utcnow(),
}
if "vxfamily" in item:
context["vxfamily"] = item["vxfamily"]
if "tags" in item:
tags.extend(item["tags"])
if "threatlevel_human" in item:
context["threatlevel_human"] = item["threatlevel_human"]
if "threatlevel" in item:
context["threatlevel"] = item["threatlevel"]
if "type" in item:
context["type"] = item["type"]
if "size" in item:
context["size"] = item["size"]
if "vt_detect" in item:
context["virustotal_score"] = item["vt_detect"]
if "et_alerts_total" in item:
context["et_alerts_total"] = item["et_alerts_total"]
if "process_list" in item:
context["count_process_spawn"] = len(item["process_list"])
context["url"] = "https://www.hybrid-analysis.com" + item["reporturl"]
f_hyb.add_context(context, dedup_list=["date_added"])
f_hyb.tag(tags)
f_hyb.add_source("feed")
sha256.add_context(context, dedup_list=["date_added"])
md5 = Hash.get_or_create(value=item["md5"])
md5.add_source("feed")
md5.add_context(context, dedup_list=["date_added"])
f_hyb.active_link_to(md5, "md5", self.name)
sha1 = Hash.get_or_create(value=item["sha1"])
sha1.add_source("feed")
sha1.add_context(context, dedup_list=["date_added"])
f_hyb.active_link_to(sha1, "sha1", self.name)
if "domains" in item:
for domain in item["domains"]:
try:
new_host = Hostname.get_or_create(value=domain)
f_hyb.active_link_to(new_host, "C2", self.name)
logging.debug(domain)
new_host.add_context({"source": self.name, "contacted_by": f_hyb})
new_host.add_source("feed")
except ObservableValidationError as e:
logging.error(e)
if "extracted_files" in item:
for extracted_file in item["extracted_files"]:
context_file_dropped = {"source": self.name}
if not "sha256" in extracted_file:
logging.error(extracted_file)
continue
new_file = File.get_or_create(
value="FILE:{}".format(extracted_file["sha256"])
)
sha256_new_file = Hash.get_or_create(value=extracted_file["sha256"])
sha256_new_file.add_source("feed")
new_file.active_link_to(sha256_new_file, "sha256", self.name)
context_file_dropped["virustotal_score"] = 0
context_file_dropped["size"] = extracted_file["file_size"]
if "av_matched" in extracted_file:
context_file_dropped["virustotal_score"] = extracted_file[
"av_matched"
]
if "threatlevel_readable" in extracted_file:
context_file_dropped["threatlevel"] = extracted_file[
"threatlevel_readable"
]
if "av_label" in extracted_file:
context_file_dropped["av_label"] = extracted_file["av_label"]
if "type_tags" in extracted_file:
new_file.tag(extracted_file["type_tags"])
new_file.add_context(context_file_dropped)
sha256_new_file.add_context(context_file_dropped)
new_file.add_source(self.name)
f_hyb.active_link_to(new_file, "drop", self.name)
| {
"content_hash": "302c52a8257143c13597d847bdf78674",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 86,
"avg_line_length": 34.5,
"alnum_prop": 0.5421130014702793,
"repo_name": "yeti-platform/yeti",
"id": "7351ba911d6421dd3e7eaeb617f6a6a21db85388",
"size": "4761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/feeds/public/hybrid_analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18580"
},
{
"name": "Dockerfile",
"bytes": "1379"
},
{
"name": "HTML",
"bytes": "182623"
},
{
"name": "JavaScript",
"bytes": "79774"
},
{
"name": "Python",
"bytes": "586533"
},
{
"name": "Shell",
"bytes": "8495"
}
],
"symlink_target": ""
} |
class JSONResponse(HttpResponse):
"""
Return a JSON serialized HTTP resonse
"""
def __init__(self, request, data, status = 200):
serialized = json.dumps(data)
super(JSONResponse, self).__init__(
content = serialized,
content_type = 'application/json',
status = status
)
class JSONViewMixin(object):
"""
Add this mixin to a Django CBV subclass to easily return JSON data.
"""
def json_response(self, data, status = 200):
return JSONResponse(self.request, data, status = 200)
def generate_profile_sidebar(request):
args = {'static_blob':static_blob, }
args.update(csrf(request))
args.update({
'my_lists' : List.objects.filter(owner__id = request.user.id)
})
return args
def list_edit(request, primkey = None):
"""
This view gives a form to the template and saves it
It can also give a blank form in primkey = None
Args : primkey - key of list to edit. if None : create new List
"""
list_instance = None
list_edit_form = ListEditForm()
if primkey:
list_instance = List.objects.get(pk = primkey)
list_edit_form = ListEditForm(instance = list_instance)
if request.method == "POST":
if list_instance:
list_edit_form = ListEditForm(request.POST, instance = list_instance)
else:
list_edit_form = ListEditForm(request.POST)
if list_edit_form.is_valid():
my_list = list_edit_form.save()
messages.success(request, 'The list : <b>%s</b> was saved successfully !' % my_list.name)
return HttpResponseRedirect("/me")
else:
#Render the form along with all its errors.
return render_to_response ('catalog/list/edit.html', locals(), context_instance = RequestContext(request))
context = generate_profile_sidebar(request)
context.update( {
'list_instance' : list_instance,
'list_edit_form' : list_edit_form,
} )
return render_to_response('catalog/lists/edit.html', context, context_instance = RequestContext(request))
def list_new(request):
return list_edit(request, None)
def list_view(request, primkey = None):
"""
This view gives a list to the template to show the products in that list
Args : primkey of list to show
"""
context = generate_profile_sidebar(request)
if primkey:
list_instance = List.objects.get(pk = primkey)
context.update( {
'list_instance' : list_instance,
'list_items' : list_instance.items.all()
} )
return render_to_response('catalog/lists/view.html', context, context_instance = RequestContext(request))
def ajax(request):
response = "initial"
if request.user.is_authenticated():
# response = "initial2"
if 'like' in request.POST:
if request.POST['type'] == 'image':
i = ProductImage.objects.get(pk = request.POST['image_id'])
p = Product.objects.get(pk = request.POST['product_id'])
check = LikeProductImage.objects.filter(image = i, user = request.user)
if request.POST['like'] == "like":
if not check:
l = LikeProductImage(image = i, user = request.user, product = p, time = datetime.now())
l.save()
response = "okay"
else:
response = "data redundant"
if request.POST['like'] == "unlike":
if check:
l = LikeProductImage.objects.get(image = i, user = request.user)
l.delete()
response = "okay"
else:
response = "data redundant"
elif request.POST['type'] == 'tutorial':
t = Tutorial.objects.get(pk = request.POST['tutorial_id'])
p = Product.objects.get(pk = request.POST['product_id'])
check = LikeTutorial.objects.filter(tutorial = t, user = request.user)
if request.POST['like'] == "like":
if not check:
l = LikeTutorial(tutorial = t, user = request.user, product = p, time = datetime.now())
l.save()
response = "okay"
else:
response = "data redundant"
if request.POST['like'] == "unlike":
if check:
l = LikeTutorial.objects.get(tutorial = t, user = request.user)
l.delete()
response = "okay"
else:
response = "data redundant"
elif request.POST['type'] == 'makey':
m = Makey.objects.get(pk = request.POST['makey_id'])
p = Product.objects.get(pk = request.POST['product_id'])
check = LikeMakey.objects.filter(makey = m, user = request.user)
if request.POST['like'] == "like":
if not check:
l = LikeMakey(makey = m, user = request.user, product = p, time = datetime.now())
l.save()
response = "okay"
else:
response = "data redundant"
if request.POST['like'] == "unlike":
if check:
l = LikeMakey.objects.get(makey = m, user = request.user)
l.delete()
response = "okay"
else:
response = "data redundant"
elif request.POST['type'] == 'product':
p = Product.objects.get(pk = request.POST['product_id'])
check = LikeProduct.objects.filter(product = p, user = request.user)
if request.POST['like'] == "like":
if not check:
l = LikeProduct(user = request.user, product = p, time = datetime.now())
l.save()
response = "okay"
else:
response = "data redundant"
if request.POST['like'] == "unlike":
if check:
l = LikeProduct.objects.get(product = p, user = request.user)
l.delete()
response = "okay"
else:
response = "data redundant"
elif request.POST['type'] == 'shop':
s = Shop.objects.get(pk = request.POST['shop_id'])
check = LikeShop.objects.filter(shop = s, user = request.user)
if request.POST['like'] == "like":
if not check:
l = LikeShop(user = request.user, shop = s, time = datetime.now())
l.save()
response = "okay"
else:
response = "data redundant"
if request.POST['like'] == "unlike":
if check:
l = LikeShop.objects.get(shop = s, user = request.user)
l.delete()
response = "okay"
else:
response = "data redundant"
if 'adddata' in request.POST:
# response = "initial3"
if request.POST['adddata'] == 'tutorial':
form = TutorialForm({'url':request.POST['url'], })
if form.is_valid():
cd = form.cleaned_data
q = Tutorial.objects.filter(url = cd['url'])
if q:
q2 = Product.objects.filter(tutorials__url = cd['url'])
if q2:
response = "Thank you for adding the url but the tutorial is already linked to this maker part"
else:
tut = Tutorial.objects.get(url = cd['url'])
p = Product.objects.get(pk = request.POST['product_id'])
p.tutorials.add(tut)
p.save()
response = "okay"
else:
p = Product.objects.get(pk = request.POST['product_id'])
p.tutorials.create(user = request.user, url = cd['url'], added_time = datetime.now())
p.save()
response = "okay"
else:
response = "Please enter a valid url"
elif request.POST['adddata'] == 'makey':
form = MakeyForm({'url':request.POST['url'], 'name':request.POST['name']})
if form.is_valid():
cd = form.cleaned_data
q = Makey.objects.filter(url = cd['url'])
if q:
q2 = Product.objects.filter(makeys__url = cd['url'])
if q2:
response = "This makey is already linked to this maker part"
else:
makey = Makey.objects.get(url = cd['url'])
p = Product.objects.get(pk = request.POST['product_id'])
p.makeys.add(makey)
p.save()
response = "okay"
else:
p = Product.objects.get(pk = request.POST['product_id'])
p.makeys.create(user = request.user, name = cd['name'], url = cd['url'], added_time = datetime.now())
p.save()
response = "okay"
else:
response = "Please enter a valid url"
else:
response = "login required"
return HttpResponse(response)
def ajaxemail(request):
if request.method == 'POST':
email = request.POST['email']
e = EmailCollect(email = email)
e.save()
response = "okay"
return HttpResponse(response)
def me(request):
if request.user.is_authenticated() == False:
return HttpResponse('You need to login to see this page')
args = generate_profile_sidebar(request)
return render_to_response('catalog/me.html', args, context_instance = RequestContext(request))
def email_collect(request):
args = {'static_blob':static_blob, }
args.update(csrf(request))
return render_to_response('catalog/email_collect.html', args, context_instance = RequestContext(request))
@login_required
def cfi_store_old(request):
login = request.user.is_authenticated()
login_alex=False
if request.user.username=="alex":
login_alex=True
store=Shop.objects.get(name=store)
productshopurls=ProductShopUrl.objects.filter(shop__name=store)
products = Product.objects.filter(productshopurls__shop__name=store).order_by('-score')
for product in products:
if CfiStoreItem.objects.filter(item=product):
product.incfistore=True
context = {
'static_blob':static_blob,
'products':products_page,
'list_pages' : list_pages,
'total_products' : 0,
'login':login,
'login_alex':login_alex,
'store':store,
}
return render(request, 'catalog/store_page_old.html')
def makey_page(request, makey_id):
user_details = get_user_details_json(request)
makey = Makey.objects.get(pk = makey_id)
if makey:
if user_details:
can_edit = False
if(request.user in makey.collaborators.all() or request.user.id == 1):
can_edit = True
# userflags=UserFlags.objects.get(user=)
return render(request, 'catalog/makey_page.html', {
'makey_id':makey_id,
'makey_name' : makey.name,
'user_details' : user_details,
'can_edit' : can_edit})
else:
return render(request, 'catalog/makey_page_wo_login.html', {
'makey':makey,
'makey_id' : makey.id,
# 'creator':creator,
})
else:
return HttpResponse('404 Error - this makey does not exist')
# Makey page is the one with the actual data.
def project_page(request, project_id):
login = request.user.is_authenticated()
return render(request, 'catalog/project_page.html', {'static_blob':static_blob, 'login':login})
def product_page_old(request, sku):
if_email_add(request)
# if request.user.is_authenticated() == False:
# return HttpResponseRedirect("/launching_soon")
# products = Product.objects.all()[5599:]
# for p in products:
# p.sku = str(1+(int(p.id)-1)/10)
# p.save()
# Product.objects.latest('pub_date') # this is to get the latest row in a table
# product_id = str(1+(int(product_id)-1)*10)
# product_id = product_id+'1'
product = Product.objects.get(id = sku)
if product.identicalto:
return HttpResponseRedirect("/product/"+str(product.identicalto.id)+"/")
product.descriptions = ProductDescription.objects.filter(product = product.id)
product.shopurls = ProductShopUrl.objects.filter(product = product.id)
# product.images = ProductImage.objects.filter(product = product.id)
product.images = ProductImage.objects.filter(product = product.id) # , url__icontains = "small"
# product.tutorials = Tutorial.objects.filter(product = product.id)
login = request.user.is_authenticated()
if login == True:
for image in product.images:
image.all_likes = LikeProductImage.objects.filter(image = image)
q = LikeProductImage.objects.filter(image = image.id, user = request.user)
if q:
image.like = True
else:
image.like = False
# get all the product likes
product.all_likes = LikeProduct.objects.filter(product = product)
q = LikeProduct.objects.filter(product = product, user = request.user)
if q:
product.like = True
else:
product.like = False
# get all the product likes
for shopurl in product.shopurls:
# shop = Shop.objects.get(id = shopurl.shop)
shopurl.all_likes = LikeShop.objects.filter(shop = shopurl.shop)
q = LikeShop.objects.filter(shop = shopurl.shop, user = request.user)
if q:
shopurl.like = True
else:
shopurl.like = False
# get all tutorial likes
product.tutorials_detail = []
for tutorial in product.tutorials.all():
tutorial.all_likes = LikeTutorial.objects.filter(tutorial = tutorial)
if login == True:
q = LikeTutorial.objects.filter(tutorial = tutorial, user = request.user)
if q:
tutorial.like = True
else:
tutorial.like = False
product.tutorials_detail.append(tutorial)
# del product.tutorials
# get all tutorial likes
product.makeys_detail = []
for makey in product.makeys.all():
makey.all_likes = LikeMakey.objects.filter(makey = makey)
if login == True:
q = LikeMakey.objects.filter(makey = makey, user = request.user)
if q:
makey.like = True
else:
makey.like = False
product.makeys_detail.append(makey)
# del product.tutorials
return render(request, 'catalog/product_page_original.html', {'static_blob':static_blob, 'login':login, 'product':product, })
def vendor_signup_page(request):
if_email_add(request)
return render(request, 'catalog/vendor_signup_page.html', {})
def landing_page2(request):
if_email_add(request)
# if request.user.is_authenticated() == False:
# return HttpResponseRedirect("/launching_soon")
login = request.user.is_authenticated()
errors = []
if 'q' in request.GET:
form = SearchForm({'q':request.GET['q'], })
cd = ''
if form.is_valid():
cd = form.cleaned_data
if cd:
q = cd['q']
else:
q = ''
if not q:
errors.append("Enter a search term.")
elif len(q) < 1:
errors.append("Enter atleast 1 characters.")
elif len(q) > 50:
errors.append("Enter atmost 50 characters.")
else:
if request.user.is_authenticated() == True:
log = SearchLog(term = q, user = request.user, time = datetime.now())
else:
log = SearchLog(term = q, time = datetime.now())
log.save()
q_clean = unicodedata.normalize('NFKD', q).encode('ascii', 'ignore').translate(string.maketrans("", ""), string.punctuation).strip().split(" ")
qs = reduce(operator.and_, (Q(name__icontains = n) for n in q_clean))
# print "search query\n\n"
# print qs
products = Product.objects.filter(qs).filter(identicalto = None).order_by('-score')
# for product in products:
# product.url = ProductShopUrl
paginator = Paginator(products, 30)
# Show 25 contacts per page
page = request.GET.get('page')
if not page:
page = 1
try:
products_page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products_page = paginator.page(1)
page = 1
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products_page = paginator.page(paginator.num_pages)
page = paginator.num_pages
if int(paginator.num_pages) == 1:
list_pages = [1]
elif int(paginator.num_pages) <= 8:
list_pages = range(1, int(paginator.num_pages)+1)
elif int(page) <= 2:
list_pages = range(1, 5) + ['. . .'] + [paginator.num_pages]
elif int(page) >= int(paginator.num_pages) - 2:
list_pages = [1, '. . .'] + range(paginator.num_pages-3, paginator.num_pages+1)
else:
list_pages = ['. . .'] + range(int(page)-2, int(page)+3) + ['. . .']
for product in products_page:
img = ProductImage.objects.filter(product = product.id, is_enabled=True)
# print(img)
if img:
product.image_p = img[0]
context = {
'static_blob':static_blob,
'products':products_page,
'list_pages' : list_pages,
'total_products' : 0,
'query':q,
'login':login,
}
return render(request, 'catalog/search_result.html', context)
stores = Shop.objects.count()
products = Product.objects.count()
return render(request, 'catalog/search_form.html', {'static_blob':static_blob, 'login':login, 'errors': errors, "stores":stores, "products":products})
| {
"content_hash": "59d8683cae6533d2d328dc42d6d25571",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 155,
"avg_line_length": 41.55744680851064,
"alnum_prop": 0.5212471841081302,
"repo_name": "Makeystreet/makeystreet",
"id": "966f47918a4747a7fe5cf02cae8c9718e7b05083",
"size": "19533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woot/apps/catalog/views_archive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1893401"
},
{
"name": "HTML",
"bytes": "2253311"
},
{
"name": "JavaScript",
"bytes": "1698946"
},
{
"name": "Python",
"bytes": "9010343"
}
],
"symlink_target": ""
} |
"""Script for Expedia Hotel Recommendations Kaggle competition.
Particular verison is based on Dataquest tutorial by Vik Paruchuri
<https://www.dataquest.io/blog/kaggle-tutorial/>
Main idea here lies in fiding most popular hotel clusters
in the training data for all the data and for each particular
`destination id`. Also it uses some information from data leak.
"""
import operator
import numpy as np
import pandas as pd
# Helper functions
def read_csv(filename, cols=None, nrows=None):
"""Data import and basic mangling.
Parameters
----------
filename : str, file name of the CSV file
cols : array-like, default None
Return a subset of columns.
nrows : int, default None
Number of rows of file to read.
Returns
-------
result : DataFrame
"""
datecols = ['date_time', 'srch_ci', 'srch_co']
dateparser = lambda x: pd.to_datetime(x, format='%Y-%m-%d %H:%M:%S',
errors='coerce')
dtypes = {
'id': np.uint32,
'site_name': np.uint8,
'posa_continent': np.uint8,
'user_location_country': np.uint16,
'user_location_region': np.uint16,
'user_location_city': np.uint16,
'orig_destination_distance': np.float32,
'user_id': np.uint32,
'is_mobile': bool,
'is_package': bool,
'channel': np.uint8,
'srch_adults_cnt': np.uint8,
'srch_children_cnt': np.uint8,
'srch_rm_cnt': np.uint8,
'srch_destination_id': np.uint32,
'srch_destination_type_id': np.uint8,
'is_booking': bool,
'cnt': np.uint64,
'hotel_continent': np.uint8,
'hotel_country': np.uint16,
'hotel_market': np.uint16,
'hotel_cluster': np.uint8,
}
df = pd.read_csv(
filename,
nrows=nrows,
usecols=cols,
dtype=dtypes,
parse_dates=[col for col in datecols if col in cols],
date_parser=dateparser,
)
if 'date_time' in df.columns:
df['month'] = df['date_time'].dt.month.astype(np.uint8)
df['year'] = df['date_time'].dt.year.astype(np.uint16)
return df
def find_most_common(df):
"""Find the most common hotel clusters in the whole dataset.
"""
return list(df['hotel_cluster'].value_counts().head().index)
def make_key(items):
return "_".join([str(i) for i in items])
def find_most_common_in_match(data_frame, match_cols):
"""Find the most common hotel clusters for each destination.
"""
cluster_cols = match_cols + ['hotel_cluster']
groups = data_frame.groupby(cluster_cols)
top_clusters = {}
for name, group in groups:
bookings = group['is_booking'].sum()
clicks = len(group) - bookings
score = bookings + .15*clicks
clus_name = make_key(name[:len(match_cols)])
if clus_name not in top_clusters:
top_clusters[clus_name] = {}
top_clusters[clus_name][name[-1]] = score
cluster_dict = {}
for n in top_clusters:
tc = top_clusters[n]
top = [
l[0]
for l
in sorted(tc.items(), key=operator.itemgetter(1), reverse=True)[:5]
]
cluster_dict[n] = top
return cluster_dict
def find_exact_match(row, groups, match_cols):
"""Find an exact mach for a row in groups based on match_cols.
"""
index = tuple(row[t] for t in match_cols)
try:
group = groups.get_group(index)
except KeyError:
return []
clus = list(set(group.hotel_cluster))
return clus
def f5(seq, idfun=None):
"""Uniquify a list by Peter Bengtsson
https://www.peterbe.com/plog/uniqifiers-benchmark
"""
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def main():
"""Main script.
"""
# Defining data columns that will be used
#traincols = ['date_time', 'site_name', 'posa_continent', 'user_location_country',
#'user_location_region', 'user_location_city', 'orig_destination_distance',
#'user_id', 'is_mobile', 'is_package', 'channel', 'srch_ci', 'srch_co',
#'srch_adults_cnt', 'srch_children_cnt', 'srch_rm_cnt', 'srch_destination_id',
#'srch_destination_type_id', 'is_booking', 'cnt', 'hotel_continent',
#'hotel_country', 'hotel_market', 'hotel_cluster']
#testcols = ['id', 'date_time', 'site_name', 'posa_continent', 'user_location_country',
#'user_location_region', 'user_location_city', 'orig_destination_distance',
#'user_id', 'is_mobile', 'is_package', 'channel', 'srch_ci', 'srch_co',
#'srch_adults_cnt', 'srch_children_cnt', 'srch_rm_cnt', 'srch_destination_id',
#'srch_destination_type_id', 'hotel_continent', 'hotel_country', 'hotel_market']
traincols = [
'user_location_country', 'user_location_region', 'user_location_city',
'orig_destination_distance', 'srch_destination_id', 'is_booking',
'hotel_market', 'hotel_cluster',
]
testcols = [
'user_location_country', 'user_location_region', 'user_location_city',
'orig_destination_distance', 'srch_destination_id', 'hotel_market',
]
# Reading training data
train = read_csv('data/train.csv.gz', cols=traincols, nrows=None)
# Find the most common hotel clusters in the dataset
top_clust = find_most_common(train)
# Find the most common hotel clusters for each destination
match_cols_dest = ['srch_destination_id']
top_clust_in_dest = find_most_common_in_match(train, match_cols_dest)
# Utilizing the data leak
match_cols_leak = [
'user_location_country',
'user_location_region',
'user_location_city',
'hotel_market',
'orig_destination_distance',
]
groups = train.groupby(match_cols_leak)
# Reading test data
test = read_csv('data/test.csv.gz', cols=testcols, nrows=None)
# Make predictions
preds = []
for _, row in test.iterrows():
# Use the most common hotel cluster data
key = make_key([row[m] for m in match_cols_dest])
pred_dest = top_clust_in_dest.get(key, top_clust)
# Use the data leak
pred_leak = find_exact_match(row, groups, match_cols_leak)
full_pred = f5(pred_leak + pred_dest)[:5]
preds.append(full_pred)
# Write out the submission file
write_p = [" ".join([str(l) for l in p]) for p in preds]
write_frame = [
"{},{}".format(train.index[i], write_p[i])
for i in range(len(preds))]
write_frame = ["id,hotel_cluster"] + write_frame
with open('out/predictions_1.csv', 'w+') as f:
f.write('\n'.join(write_frame))
if __name__ == '__main__':
main()
| {
"content_hash": "46718a438fc1e997f5cc2eb273f419e1",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 96,
"avg_line_length": 31.093333333333334,
"alnum_prop": 0.5901943967981704,
"repo_name": "ppik/playdata",
"id": "f846a9c0f5e336e656dcc5378391b59fc698af6f",
"size": "7018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Kaggle-Expedia/script_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "155299"
},
{
"name": "Python",
"bytes": "9445"
}
],
"symlink_target": ""
} |
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import unittest
import time
import socket
import random
from optparse import OptionParser
class TimeoutTest(unittest.TestCase):
def setUp(self):
for i in range(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for i in range(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except:
self.assert_(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
lsock = self.listen_sock.accept()
while True:
socket.write("hi" * 100)
except:
self.assert_(time.time() - starttime < 5.0)
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(suite)
| {
"content_hash": "a6f45b2902a0f837d4666f92541999f4",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 30.529411764705884,
"alnum_prop": 0.6088631984585742,
"repo_name": "chentao/thrift",
"id": "55e4996e87e52a62f43b573c1779bb124ec275f4",
"size": "2885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/py/TestSocket.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "75532"
},
{
"name": "Batchfile",
"bytes": "5757"
},
{
"name": "C",
"bytes": "672642"
},
{
"name": "C#",
"bytes": "388334"
},
{
"name": "C++",
"bytes": "3818824"
},
{
"name": "CMake",
"bytes": "92366"
},
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "D",
"bytes": "644980"
},
{
"name": "Dart",
"bytes": "146395"
},
{
"name": "Emacs Lisp",
"bytes": "5361"
},
{
"name": "Erlang",
"bytes": "293090"
},
{
"name": "Go",
"bytes": "450262"
},
{
"name": "HTML",
"bytes": "23089"
},
{
"name": "Haskell",
"bytes": "103827"
},
{
"name": "Haxe",
"bytes": "304443"
},
{
"name": "Java",
"bytes": "957083"
},
{
"name": "JavaScript",
"bytes": "340910"
},
{
"name": "LLVM",
"bytes": "16129"
},
{
"name": "Lua",
"bytes": "48477"
},
{
"name": "Makefile",
"bytes": "15010"
},
{
"name": "OCaml",
"bytes": "39241"
},
{
"name": "Objective-C",
"bytes": "153651"
},
{
"name": "PHP",
"bytes": "279510"
},
{
"name": "Pascal",
"bytes": "387179"
},
{
"name": "Perl",
"bytes": "119756"
},
{
"name": "Python",
"bytes": "319142"
},
{
"name": "Ruby",
"bytes": "389656"
},
{
"name": "Shell",
"bytes": "28311"
},
{
"name": "Smalltalk",
"bytes": "22944"
},
{
"name": "Swift",
"bytes": "28538"
},
{
"name": "Thrift",
"bytes": "307334"
},
{
"name": "VimL",
"bytes": "2846"
},
{
"name": "Yacc",
"bytes": "26840"
}
],
"symlink_target": ""
} |
from __future__ import print_function
# =============================================================================
# DOCS
# =============================================================================
__doc__ = """Make band merges difference and union."""
__version__ = "0.0.2"
# =============================================================================
# IMPORTS
# =============================================================================
import sys
import argparse
import logging
import warnings
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from astropysics import coords
# =============================================================================
# LOG
# =============================================================================
logger = logging.getLogger("mmatch")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.WARNING)
# =============================================================================
# CONSTANTS
# =============================================================================
DOC = __doc__
VERSION = __version__
EPILOG = "BSD-3 Licensed - IATE-OAC: http://iate.oac.uncor.edu/"
DEFAULT_RADIUS = 3 * 9.2592592592592588e-5
EPOCHS = 3
SOURCE_DTYPE = {
'names': ['ra_h', 'dec_h', 'ra_j', 'dec_j', 'ra_k', 'dec_k'],
'formats': [float, float, float, float, float, float]
}
USECOLS = [0, 1, 2, 3, 4, 5]
FORMATS = ['%i', '%1.18e', '%1.18e', '%1.18e', '%1.18e', '%1.18e', '%1.18e']
# =============================================================================
# MAIN
# =============================================================================
def add_columns(arr, extra_cols, append=False):
"""Add extra columns to the output of beamc"""
dtype = (
[(k, v.dtype) for k, v in extra_cols] +
[(n, f) for n, f in arr.dtype.descr])
extra_cols = dict(extra_cols)
# create an empty array and copy the values
data = np.empty(len(arr), dtype=dtype)
for name in data.dtype.names:
if name in extra_cols:
data[name] = extra_cols[name]
else:
data[name] = arr[name]
return data
def read_bm(fp, band="k", **kwargs):
logger.info("- Reading {}...".format(fp))
kwargs.setdefault("dtype", SOURCE_DTYPE)
kwargs.setdefault("skip_header", EPOCHS)
kwargs.setdefault("usecols", USECOLS)
arr = np.genfromtxt(fp, **kwargs)
if arr.ndim == 0:
arr = arr.flatten()
indexed = add_columns(arr, [("idx", np.arange(len(arr)))])
ra = "ra_{}".format(band)
flt = (indexed[ra] != -9999.0)
filtered = indexed[flt]
logger.info("Found {}/{} valid sources".format(len(filtered), len(arr)))
return filtered
def match(bm0_ra, bm0_dec, bm1_ra, bm1_dec, radius=DEFAULT_RADIUS):
logger.info("- Matching max distance of radius {}...".format(radius))
nearestind_bm1, distance_bm1, match_bm1 = coords.match_coords(
bm0_ra, bm0_dec, bm1_ra, bm1_dec, eps=radius, mode="nearest")
nearestind_bm0, distance_bm0, match_bm0 = coords.match_coords(
bm1_ra, bm1_dec, bm0_ra, bm0_dec, eps=radius, mode="nearest")
for idx_bm1, idx_bm0 in enumerate(nearestind_bm0):
if match_bm0[idx_bm1] and \
nearestind_bm1[idx_bm0] == idx_bm1 \
and match_bm1[idx_bm0]:
yield idx_bm0, idx_bm1
def difference(ibm, flts, radius=DEFAULT_RADIUS, band="k"):
ra, dec = "ra_{}".format(band), "dec_{}".format(band)
to_remove = None
logger.info("[MATCH]")
for flt in flts:
matches = np.fromiter(
match(ibm[ra], ibm[dec], flt[ra], flt[dec], radius=radius),
dtype=[("idx_ibm", int), ("idx_flt", int)])
logger.info("Found {} sources matches".format(len(matches)))
if to_remove is None:
to_remove = matches["idx_ibm"]
else:
to_remove = np.append(to_remove, matches["idx_ibm"])
logger.info("[FILTERING]")
uto_remove = np.unique(to_remove)
logger.info("{} unique sources to remove".format(len(uto_remove)))
clean_mask = ~np.in1d(np.arange(len(ibm)), uto_remove)
return ibm[clean_mask]
def union(bms, radius=DEFAULT_RADIUS, band="k"):
ra, dec = "ra_{}".format(band), "dec_{}".format(band)
united = None
for idx, bm in enumerate(bms):
bm_len = len(bm)
bm_idx = np.zeros(bm_len, dtype=int) + idx
bm = add_columns(bm, [("bm_idx", bm_idx)])
if united is None:
united = bm
else:
matches = np.fromiter(
match(united[ra], united[dec], bm[ra], bm[dec], radius=radius),
dtype=[("idx_united", int), ("idx_bm", int)])
logger.info("Found {} sources matches".format(matches.size))
logger.info("Filtering...")
clean_mask = ~np.in1d(np.arange(bm_len), matches["idx_bm"])
united = np.append(united, bm[clean_mask])
return united
| {
"content_hash": "99414f7e33626389722f9e648cbc66dc",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 29.790419161676645,
"alnum_prop": 0.4958793969849246,
"repo_name": "carpyncho/bmdiff",
"id": "9baeb2b11b2dd8f7e04860c47ae5cd6e2fd45bab",
"size": "5022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmdiff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5983"
}
],
"symlink_target": ""
} |
import re
from oslib import osinit, OSLibError
import subprocess
import time
from oslib.mime_utils import MimeMessage, URL
import oslib.getuser as getuser
import os
import os.path
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.networkinterface import NetworkInterfaceSpecification,NetworkInterfaceCollection
import oslib
import socket
import oslib.ec2_objects
import yaml
import oslib.resources
from oslib.instance.windows import GetWindowsPassword
def parse_facts(option, opt_str, value, parser, *args, **kwargs):
(key,value) = value
local_facts = parser.values.local_facts
if key in local_facts:
local_facts[key].append(value)
else:
local_facts[key] = [value]
def file_parser(parser):
parser.add_option("-n", "--name", dest="name", help="Instance name", default=None)
parser.add_option("-s", "--volume_size", dest="volume_size", help="new volume size (GB)", default=[], action='append', type="string")
parser.add_option("-i", "--snap_id", dest="snap_id", help="some snashop to generate volume from", default=[], action='append')
parser.add_option("-U", "--url", dest="url_commands", help="URL to os-init command", default=[], action='append')
parser.add_option("-t", "--instance-type", dest="instance_type", help="Specifies the type of instance to be launched.", default=None)
parser.add_option("-p", "--placement", dest="placement", help="The availability zone in which to launch the instances")
parser.add_option("-S", "--security_groups", dest="security_groups", help="The names of the security groups with which to associate instances", action='append')
parser.add_option("-u", "--user", dest="user", help="login user", default=None)
parser.add_option("-k", "--key_name", dest="key_name", help="key name", default=None)
parser.add_option("-f", "--key_file", dest="key_file", help="key file", default=None)
parser.add_option("-F", "--Fact", dest="local_facts", help="Local facts", default={}, action="callback", callback=parse_facts, nargs=2, type="string")
parser.add_option("-H", "--hostname", dest="hostname", help="Set the hostname", default=None)
parser.add_option("-T", "--template", dest="template", help="template file", default=None)
parser.add_option("-V", "--variable", dest="variables", help="Variables", default=[], action='append')
parser.add_option("-r", "--ressource", dest="ressource", help="embedded ressource to add", default=[], action='append')
parser.add_option("-R", "--ressources_dir", dest="ressources_dir", help="ressource dir search path", default=[], action='append')
parser.add_option("-e", "--elastic_ip", dest="elastic_ip", help="create and associate an EIP for this vm", default=None, action="store_true")
parser.add_option("-I", "--private_ip", dest="private_ip_address", help="set this IP, if instance is in a VPC", default=None)
parser.add_option("-P", "--profile", dest="instance_profile_arn", help="The arn of the IAM Instance Profile (IIP) to associate with the instances.", default=None)
parser.add_option("-N", "--notrun", dest="run", help="Don't run the post install command", default=True, action="store_false")
parser.add_option("", "--subnet-id", dest="subnet_id", help="", default=None)
def build_user_data(user_data_properties, **kwargs):
user_data = MimeMessage()
for var in kwargs.pop('variables'):
var_content = var.partition('=')
if len(var_content) != 3:
raise OSLibError("Invalide variable: %s" % var)
user_data_properties[var_content[0]] = var_content[2]
if len(user_data_properties) > 0:
user_data.append(user_data_properties)
hostname = kwargs.pop('hostname')
if hostname is not None:
user_data.append("#!/bin/bash\nhostname %s && uname -a" % hostname)
#Check for local facts
local_facts = kwargs.pop('local_facts', None)
if local_facts is not None and len(local_facts) > 0:
user_data_string = "---\n"
for k,v in local_facts.iteritems():
user_data_string += "%s: %s\n" % (k, ",".join(v))
user_data.append(user_data_string, content_type='application/facter-yaml', filename='localfacts.yaml')
url_commands = kwargs.pop('url_commands')
if len(url_commands) > 0:
user_data.append(url_commands)
root_embedded = oslib.resources.__path__
search_path = [ root_embedded[0] ]
search_path.extend(kwargs.pop('ressources_dir'))
for r in kwargs.pop('ressource'):
done = False
# look for the resource in the resources search path
for path in search_path:
resource_path = os.path.join(path, r)
if os.path.exists(resource_path):
user_data.append(content_file_path=resource_path)
done = True
if not done:
raise OSLibError("resource not found: %s" % r)
kwargs['user_data'] = "%s" % user_data
return kwargs
def get_remote_user(ctxt, **kwargs):
if 'user' not in kwargs or not kwargs['user']:
remote_user = ctxt.user
else:
remote_user = kwargs['user']
if 'user' in kwargs:
del kwargs['user']
return (remote_user, kwargs)
def get_key_file(ctxt, **kwargs):
if 'key_file' not in kwargs or not kwargs['key_file']:
key_file = ctxt.key_file
else:
key_file = kwargs['key_file']
if 'key_file' in kwargs:
del kwargs['key_file']
return (key_file, kwargs)
tag_re = re.compile('tag:(.*)')
def do_tags(**kwargs):
tags = {}
for arg in kwargs.keys():
match_tag = tag_re.match(arg)
if match_tag!=None:
key = match_tag.group(1)
tags[key] = kwargs.pop(arg)
name = kwargs.pop('name', None)
if name is not None:
tags['Name'] = name
if not 'creator' in tags:
user = getuser.user
tags['creator'] = user
return (tags, kwargs)
def remote_setup(instance, remote_user, key_file):
osinit_path = osinit.__file__
if osinit_path.endswith('.pyc'):
osinit_path = osinit_path[:len(osinit_path) - 1]
remote_path="%s@%s:/tmp/osinit.py" % (remote_user, instance.public_dns_name)
args=["scp", "-o", "GSSAPIAuthentication=no", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-i", key_file, osinit_path, remote_path]
subprocess.call(args)
for remote_cmd in ('yum install -y sudo', 'sudo -n python /tmp/osinit.py decode'):
args=["ssh", "-tt", "-o", "GSSAPIAuthentication=no", "-o", "UserKnownHostsFile=/dev/null", "-x", "-o", "StrictHostKeyChecking=no", "-i", key_file, "-l", remote_user, instance.public_dns_name, remote_cmd]
subprocess.call(args)
# Eventually remove the ssh public host key
args=["ssh-keygen", "-R", instance.public_dns_name]
subprocess.call(args)
args=["ssh-keygen", "-R", instance.ip_address]
subprocess.call(args)
def parse_template(ctxt, template_file_name, kwargs):
f = open(template_file_name)
dataMap = yaml.safe_load(f)
f.close()
if not 'image_id' in kwargs or kwargs['image_id'] == None :
ami_kwargs = {}
if 'ami_name' in dataMap:
ami_kwargs['name'] = dataMap['ami_name']
del dataMap['ami_name']
elif 'ami_id' in dataMap:
ami_kwargs['id'] = dataMap['id']
del dataMap['ami_id']
ami = oslib.ec2_objects.AMI(ctxt, **ami_kwargs)
ami.get()
kwargs['image_id'] = ami.id
# check all the values that needs to be an array
for varg in ('security_groups', 'embedded_commands', 'snap_id'):
if varg in dataMap:
value = dataMap[varg]
if [].__class__ == value.__class__:
kwargs[varg] = value
elif "".__class__ == value.__class__ or u"".__class__ == value.__class__:
kwargs[varg] = [ value ]
del dataMap[varg]
if 'local_facts' in dataMap:
local_facts = kwargs['local_facts']
for k in dataMap['local_facts']:
if not k in local_facts:
local_facts[k] = dataMap['local_facts'][k]
del dataMap['local_facts']
for k in dataMap:
if k in dataMap and (not k in kwargs or kwargs[k] == None or len(kwargs[k]) == {} or kwargs[k] == []):
kwargs[k] = dataMap[k]
return kwargs
def do_build(ctxt, **kwargs):
conn = ctxt.cnx_ec2
if 'template' in kwargs and kwargs['template']:
template_file_name = kwargs['template']
kwargs = parse_template(ctxt, template_file_name, kwargs)
del kwargs['template']
defaultrun = {'instance_type': 'm1.large', 'key_name': ctxt.key_name }
for key in defaultrun:
if key not in kwargs or kwargs[key] == None:
kwargs[key] = defaultrun[key]
(remote_user, kwargs) = get_remote_user(ctxt, **kwargs)
(key_file, kwargs) = get_key_file(ctxt, **kwargs)
(tags,kwargs) = do_tags(**kwargs)
do_run_scripts = kwargs.pop('run')
###########
# Check VM naming
###########
if 'Name' not in tags and kwargs['hostname'] is not None:
tags['Name'] = kwargs['hostname']
if 'Name' not in tags:
yield "instance name is mandatory"
return
try:
oslib.ec2_objects.Instance(ctxt, name=tags['Name']).get()
# if get succed, the name already exist, else get throws an exception
yield "duplicate name %s" % tags['Name']
return
except:
pass
user_data_properties = {}
image = kwargs.pop('image_id', None)
###########
# Check device mapping
###########
volumes = BlockDeviceMapping(conn)
first_volume = 'f'
l = first_volume
ebs_optimized = False
for volume_info in kwargs.pop('volume_size', []):
# yaml is not typed, volume_info can be a string or a number
if isinstance(volume_info, basestring):
options = volume_info.split(',')
size = int(oslib.parse_size(options[0], 'G', default_suffix='G'))
else:
options = []
size = int(volume_info)
vol_kwargs = {"connection":conn, "size": size}
if len(options) > 1:
for opt in options[1:]:
parsed = opt.split('=')
key = parsed[0]
if len(parsed) == 2:
value = parsed[1]
elif len(parsed) == 1:
value = True
else:
raise OSLibError("can't parse volume argument %s", opt)
if key == 'iops':
ebs_optimized = True
vol_kwargs['volume_type'] = 'io1'
vol_kwargs[key] = value
volumes["/dev/sd%s"%l] = BlockDeviceType(**vol_kwargs)
l = chr( ord(l[0]) + 1)
kwargs['ebs_optimized'] = ebs_optimized
# if drive letter is not f, some volumes definition was found
if l != first_volume:
kwargs['block_device_map'] = volumes
user_data_properties['volumes'] = ' '.join(volumes.keys())
# after user_data_properties['volumes'] otherwise they will be lvm'ed
for snapshot_id in kwargs.pop('snap_id', []):
volumes["/dev/sd%s"%l] = BlockDeviceType(connection=conn, snapshot_id=snapshot_id)
l = chr( ord(l[0]) + 1)
kwargs = build_user_data(user_data_properties, **kwargs)
###########
# Check elastic IP
###########
if kwargs['elastic_ip']:
eip = True
else:
eip = False
del kwargs['elastic_ip']
for k in kwargs.keys()[:]:
value = kwargs[k]
if kwargs[k] == None:
del(kwargs[k])
elif value.__class__ == [].__class__ and len(value) == 0:
del(kwargs[k])
if 'private_ip_address' in kwargs and kwargs['private_ip_address']:
netif_specification = NetworkInterfaceCollection()
netif_kwargs = {}
if kwargs['private_ip_address']:
netif_kwargs['private_ip_address'] = kwargs['private_ip_address']
del kwargs['private_ip_address']
if 'associate_public_ip_address' in kwargs and kwargs['associate_public_ip_address']:
netif_kwargs['associate_public_ip_address'] = kwargs['associate_public_ip_address']
del kwargs['associate_public_ip_address']
if 'security_groups' in kwargs and kwargs['security_groups']:
netif_kwargs['groups'] = kwargs['security_groups']
del kwargs['security_groups']
netif_kwargs['subnet_id'] = kwargs['subnet_id']
del kwargs['subnet_id']
print netif_kwargs
spec = NetworkInterfaceSpecification(**netif_kwargs)
netif_specification.append(spec)
kwargs['network_interfaces'] = netif_specification
reservation = conn.run_instances(image, **kwargs)
instance = reservation.instances[0]
# Quick hack to keep the selected remote user
instance.remote_user = remote_user
if len(tags) > 0:
conn.create_tags([ instance.id ], tags)
if instance.interfaces and len(instance.interfaces) > 0:
for interface in instance.interfaces:
conn.create_tags([ interface.id ], {'creator': tags['creator']})
while instance.state != 'running' and instance.state != 'terminated':
instance.update(True)
yield (".")
time.sleep(1)
yield ("\n")
if eip:
ip = conn.allocate_address().public_ip
conn.associate_address(instance_id = instance.id, public_ip=ip)
conn.create_tags([instance.id], {"EIP": ip})
#Update tag for this instance's volumes
for device in instance.block_device_mapping:
device_type = instance.block_device_mapping[device]
(vol_tags, vol_kwargs) = do_tags(name='%s/%s' % (tags['Name'], device.replace('/dev/','')))
conn.create_tags([ device_type.volume_id ], vol_tags)
instance.update(True)
windows_instance = instance.platform == 'Windows'
if do_run_scripts and not windows_instance:
while instance.state != 'terminated':
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1.0)
s.connect((instance.public_dns_name, 22))
s.close()
break
except socket.error, msg:
yield (".")
s.close()
time.sleep(1)
yield ("\n")
instance.key_file = key_file
remote_setup(instance, remote_user, key_file)
elif windows_instance:
os_instance = oslib.ec2_objects.Instance(ctxt, id=instance.id)
passget = GetWindowsPassword()
passget.set_context(ctxt)
passget.ec2_object = os_instance
passget.validate(None)
try_again = True
while try_again:
try:
password = "\npassword is '%s'\n" % passget.execute(key_file=key_file)
yield password
try_again = False
except OSLibError:
yield (".")
time.sleep(1)
yield instance
| {
"content_hash": "30fc2224242af516540d2021cd14d94e",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 213,
"avg_line_length": 40.361702127659576,
"alnum_prop": 0.5952161307327359,
"repo_name": "fbacchella/oscmd",
"id": "20cb660f354ba891db90dc6c3e95ba64e18997b2",
"size": "15215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslib/build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "126772"
},
{
"name": "Shell",
"bytes": "2150"
}
],
"symlink_target": ""
} |
import uuid
import mock
import mox
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils as json
import six
from heat.common import context
from heat.common import environment_util as env_util
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.engine.cfn import template as cfntemplate
from heat.engine import environment
from heat.engine.hot import functions as hot_functions
from heat.engine.hot import template as hottemplate
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import generic_resource as generic_rsrc
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
cfg.CONF.import_opt('enable_stack_abandon', 'heat.common.config')
wp_template_no_default = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
user_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties" : {
"Policies" : [ { "Ref": "WebServerAccessPolicy"} ]
}
},
"WebServerAccessPolicy" : {
"Type" : "OS::Heat::AccessPolicy",
"Properties" : {
"AllowedResources" : [ "WebServer" ]
}
},
"HostKeys" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnUser"}
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
server_config_template = '''
heat_template_version: 2013-05-23
resources:
WebServer:
type: OS::Nova::Server
'''
class StackCreateTest(common.HeatTestCase):
def setUp(self):
super(StackCreateTest, self).setUp()
def test_wordpress_single_instance_stack_create(self):
stack = tools.get_stack('test_stack', utils.dummy_context())
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.assertNotEqual(stack['WebServer'].ipaddress, '0.0.0.0')
def test_wordpress_single_instance_stack_adopt(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
self.assertEqual('test-res-id', stack['WebServer'].resource_id)
self.assertEqual((stack.ADOPT, stack.COMPLETE), stack.state)
def test_wordpress_single_instance_stack_adopt_fail(self):
t = template_format.parse(tools.wp_template)
template = templatem.Template(t)
ctx = utils.dummy_context()
adopt_data = {
'resources': {
'WebServer1': {
'resource_id': 'test-res-id'
}
}
}
stack = parser.Stack(ctx,
'test_stack',
template,
adopt_stack_data=adopt_data)
tools.setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.adopt()
self.assertIsNotNone(stack['WebServer'])
expected = ('Resource ADOPT failed: Exception: resources.WebServer: '
'Resource ID was not provided.')
self.assertEqual(expected, stack.status_reason)
self.assertEqual((stack.ADOPT, stack.FAILED), stack.state)
def test_wordpress_single_instance_stack_delete(self):
ctx = utils.dummy_context()
stack = tools.get_stack('test_stack', ctx)
fc = tools.setup_mocks(self.m, stack, mock_keystone=False)
self.m.ReplayAll()
stack_id = stack.store()
stack.create()
db_s = stack_object.Stack.get_by_id(ctx, stack_id)
self.assertIsNotNone(db_s)
self.assertIsNotNone(stack['WebServer'])
self.assertTrue(int(stack['WebServer'].resource_id) > 0)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
stack.delete()
rsrc = stack['WebServer']
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((stack.DELETE, stack.COMPLETE), rsrc.state)
self.assertIsNone(stack_object.Stack.get_by_id(ctx, stack_id))
db_s.refresh()
self.assertEqual('DELETE', db_s.action)
self.assertEqual('COMPLETE', db_s.status, )
class StackConvergenceServiceCreateUpdateTest(common.HeatTestCase):
def setUp(self):
super(StackConvergenceServiceCreateUpdateTest, self).setUp()
cfg.CONF.set_override('convergence_engine', True, enforce_type=True)
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def _stub_update_mocks(self, stack_to_load, stack_to_return):
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=stack_to_load
).AndReturn(stack_to_return)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
def _test_stack_create_convergence(self, stack_name):
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, owner_id=None,
parent_resource=None,
nested_depth=0, user_creds_id=None,
stack_user_project_id=None,
timeout_mins=60,
disable_rollback=False,
convergence=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.create_stack(self.ctx, 'service_create_test_stack',
template, params, None, api_args)
db_stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id'])
self.assertTrue(db_stack.convergence)
self.assertEqual(result['stack_id'], db_stack.id)
self.m.VerifyAll()
def test_stack_create_enabled_convergence_engine(self):
stack_name = 'service_create_test_stack'
self._test_stack_create_convergence(stack_name)
def test_stack_update_enabled_convergence_engine(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five,
convergence=True)
old_stack.timeout_mins = 1
sid = old_stack.store()
s = stack_object.Stack.get_by_id(self.ctx, sid)
stack = tools.get_stack(stack_name, self.ctx,
template=tools.string_template_five_update,
convergence=True)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
owner_id=old_stack.owner_id,
nested_depth=old_stack.nested_depth,
user_creds_id=old_stack.user_creds_id,
stack_user_project_id=old_stack.stack_user_project_id,
timeout_mins=60,
disable_rollback=False,
parent_resource=None,
strict_validate=True,
tenant_id=old_stack.tenant_id,
username=old_stack.username,
convergence=old_stack.convergence,
current_traversal=old_stack.current_traversal,
prev_raw_template_id=old_stack.prev_raw_template_id,
current_deps=old_stack.current_deps).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.ReplayAll()
api_args = {'timeout_mins': 60, 'disable_rollback': False}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertTrue(old_stack.convergence)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
class StackServiceAuthorizeTest(common.HeatTestCase):
def setUp(self):
super(StackServiceAuthorizeTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.engine_id = 'engine-fake-uuid'
@tools.stack_context('service_authorize_stack_user_nocreds_test_stack')
def test_stack_authorize_stack_user_nocreds(self):
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
@tools.stack_context('service_authorize_user_attribute_error_test_stack')
def test_stack_authorize_stack_user_attribute_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(None).AndRaise(AttributeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
@tools.stack_context('service_authorize_stack_user_type_error_test_stack')
def test_stack_authorize_stack_user_type_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(mox.IgnoreArg()).AndRaise(TypeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
def test_stack_authorize_stack_user(self):
self.ctx = utils.dummy_context()
self.ctx.aws_creds = '{"ec2Credentials": {"access": "4567"}}'
stack_name = 'stack_authorize_stack_user'
stack = tools.get_stack(stack_name, self.ctx, user_policy_template)
self.stack = stack
fc = tools.setup_mocks(self.m, stack)
self.patchobject(fc.servers, 'delete',
side_effect=fakes_nova.fake_exception())
self.m.ReplayAll()
stack.store()
stack.create()
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'CfnUser'))
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
self.m.VerifyAll()
def test_stack_authorize_stack_user_user_id(self):
self.ctx = utils.dummy_context(user_id=str(uuid.uuid4()))
stack_name = 'stack_authorize_stack_user_user_id'
stack = tools.get_stack(stack_name, self.ctx, server_config_template)
self.stack = stack
def handler(resource_name):
return resource_name == 'WebServer'
self.stack.register_access_allowed_handler(self.ctx.user_id, handler)
# matching credential_id and resource_name
self.assertTrue(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
# not matching resource_name
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'NoSuchResource'))
# not matching credential_id
self.ctx.user = str(uuid.uuid4())
self.assertFalse(self.eng._authorize_stack_user(
self.ctx, self.stack, 'WebServer'))
class StackServiceTest(common.HeatTestCase):
def setUp(self):
super(StackServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
@tools.stack_context('service_identify_test_stack', False)
def test_stack_identify(self):
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
@tools.stack_context('ef0c41a4-644f-447c-ad80-7eecb0becf79', False)
def test_stack_identify_by_name_in_uuid(self):
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
@tools.stack_context('service_identify_uuid_test_stack', False)
def test_stack_identify_uuid(self):
identity = self.eng.identify_stack(self.ctx, self.stack.id)
self.assertEqual(self.stack.identifier(), identity)
def test_stack_identify_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.identify_stack, self.ctx, 'wibble')
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@tools.stack_context('service_create_existing_test_stack', False)
def test_stack_create_existing(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.create_stack, self.ctx,
self.stack.name, self.stack.t.t, {}, None, {})
self.assertEqual(exception.StackExists, ex.exc_info[0])
@tools.stack_context('service_name_tenants_test_stack', False)
def test_stack_by_name_tenants(self):
self.assertEqual(
self.stack.id,
stack_object.Stack.get_by_name(self.ctx, self.stack.name).id
)
ctx2 = utils.dummy_context(tenant_id='stack_service_test_tenant2')
self.assertIsNone(stack_object.Stack.get_by_name(
ctx2,
self.stack.name))
@tools.stack_context('service_badname_test_stack', False)
def test_stack_by_name_badname(self):
# If a bad name type, such as a map, is passed, we should just return
# None, as it's converted to a string, which won't match any name
ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.assertIsNone(stack_object.Stack.get_by_name(
ctx,
{'notallowed': self.stack.name}))
self.assertIsNone(stack_object.Stack.get_by_name_and_owner_id(
ctx,
{'notallowed': self.stack.name}, 'owner'))
@tools.stack_context('service_list_all_test_stack')
def test_stack_list_all(self):
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(1, len(sl))
for s in sl:
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('deletion_time', s)
self.assertIsNone(s['deletion_time'])
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertEqual('', s['description'])
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_marker_info(self, mock_stack_get_all):
limit = object()
marker = object()
sort_keys = object()
sort_dir = object()
self.eng.list_stacks(self.ctx, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=limit,
sort_keys=sort_keys,
marker=marker,
sort_dir=sort_dir,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_filtering_info(self, mock_stack_get_all):
filters = {'foo': 'bar'}
self.eng.list_stacks(self.ctx, filters=filters)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=filters,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_passes_filter_translated(self, mock_stack_get_all):
filters = {'stack_name': 'bar'}
self.eng.list_stacks(self.ctx, filters=filters)
translated = {'name': 'bar'}
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=translated,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_nested(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_nested=True)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=True,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_deleted(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_deleted=True)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=True,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_show_hidden(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, show_hidden=True)
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=True,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=['foo', 'bar'],
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=['foo', 'bar'],
not_tags=mock.ANY,
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=['foo', 'bar'],
not_tags_any=mock.ANY)
@mock.patch.object(stack_object.Stack, 'get_all')
def test_stack_list_not_tags_any(self, mock_stack_get_all):
self.eng.list_stacks(self.ctx, not_tags_any=['foo', 'bar'])
mock_stack_get_all.assert_called_once_with(self.ctx,
limit=mock.ANY,
sort_keys=mock.ANY,
marker=mock.ANY,
sort_dir=mock.ANY,
filters=mock.ANY,
show_deleted=mock.ANY,
show_nested=mock.ANY,
show_hidden=mock.ANY,
tags=mock.ANY,
tags_any=mock.ANY,
not_tags=mock.ANY,
not_tags_any=['foo', 'bar'])
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_passes_filter_info(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, filters={'foo': 'bar'})
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters={'foo': 'bar'},
show_deleted=False,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stacks_show_nested(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_nested=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=False,
show_nested=True,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_deleted(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_deleted=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_count_stack_show_hidden(self, mock_stack_count_all):
self.eng.count_stacks(self.ctx, show_hidden=True)
mock_stack_count_all.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=False,
show_nested=False,
show_hidden=True,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@tools.stack_context('service_export_stack')
def test_export_stack(self):
cfg.CONF.set_override('enable_stack_abandon', True, enforce_type=True)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
expected_res = {
u'WebServer': {
'action': 'CREATE',
'metadata': {},
'name': u'WebServer',
'resource_data': {},
'resource_id': '9999',
'status': 'COMPLETE',
'type': u'AWS::EC2::Instance'}}
self.stack.tags = ['tag1', 'tag2']
self.m.ReplayAll()
ret = self.eng.export_stack(self.ctx, self.stack.identifier())
self.assertEqual(11, len(ret))
self.assertEqual('CREATE', ret['action'])
self.assertEqual('COMPLETE', ret['status'])
self.assertEqual('service_export_stack', ret['name'])
self.assertEqual({}, ret['files'])
self.assertIn('id', ret)
self.assertEqual(expected_res, ret['resources'])
self.assertEqual(self.stack.t.t, ret['template'])
self.assertIn('project_id', ret)
self.assertIn('stack_user_project_id', ret)
self.assertIn('environment', ret)
self.assertIn('files', ret)
self.assertEqual(['tag1', 'tag2'], ret['tags'])
self.m.VerifyAll()
@tools.stack_context('service_abandon_stack')
def test_abandon_stack(self):
cfg.CONF.set_override('enable_stack_abandon', True, enforce_type=True)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
self.eng.abandon_stack(self.ctx, self.stack.identifier())
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, self.stack.identifier(),
resolve_outputs=True)
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_describe_nonexistent(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.EntityNotFound(
entity='Stack', name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier,
resolve_outputs=True)
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
self.m.VerifyAll()
def test_stack_describe_bad_tenant(self):
non_exist_identifier = identifier.HeatIdentifier(
'wibble', 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
invalid_tenant_exc = exception.InvalidTenant(target='test',
actual='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(invalid_tenant_exc)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_stack,
self.ctx, non_exist_identifier,
resolve_outputs=True)
self.assertEqual(exception.InvalidTenant, ex.exc_info[0])
self.m.VerifyAll()
@tools.stack_context('service_describe_test_stack', False)
def test_stack_describe(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier(),
show_deleted=True).AndReturn(s)
self.m.ReplayAll()
sl = self.eng.show_stack(self.ctx, self.stack.identifier(),
resolve_outputs=True)
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('deletion_time', s)
self.assertIsNone(s['deletion_time'])
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
self.m.VerifyAll()
@tools.stack_context('service_describe_all_test_stack', False)
def test_stack_describe_all(self):
sl = self.eng.show_stack(self.ctx, None, resolve_outputs=True)
self.assertEqual(1, len(sl))
s = sl[0]
self.assertIn('creation_time', s)
self.assertIn('updated_time', s)
self.assertIn('deletion_time', s)
self.assertIsNone(s['deletion_time'])
self.assertIn('stack_identity', s)
self.assertIsNotNone(s['stack_identity'])
self.assertIn('stack_name', s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertIn('stack_status', s)
self.assertIn('stack_status_reason', s)
self.assertIn('description', s)
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_versions(self, templ_mock):
class DummyMgr(object):
def names(self):
return ['a.2012-12-12', 'c.newton', 'c.2016-10-14',
'c.something']
def __getitem__(self, item):
m = mock.MagicMock()
if item == 'a.2012-12-12':
m.plugin = cfntemplate.CfnTemplate
return m
else:
m.plugin = hottemplate.HOTemplate20130523
return m
templ_mock.return_value = DummyMgr()
templates = self.eng.list_template_versions(self.ctx)
expected = [{'version': 'a.2012-12-12', 'type': 'cfn', 'aliases': []},
{'version': 'c.2016-10-14',
'aliases': ['c.newton', 'c.something'], 'type': 'hot'}]
self.assertEqual(expected, templates)
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_versions_invalid_version(self, templ_mock):
class DummyMgr(object):
def names(self):
return ['c.something']
def __getitem__(self, item):
m = mock.MagicMock()
if item == 'c.something':
m.plugin = cfntemplate.CfnTemplate
return m
templ_mock.return_value = DummyMgr()
ret = self.assertRaises(exception.InvalidTemplateVersions,
self.eng.list_template_versions, self.ctx)
self.assertIn('A template version alias c.something was added',
six.text_type(ret))
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_functions(self, templ_mock):
class DummyFunc1(object):
"""Dummy Func1.
Dummy Func1 Long Description.
"""
class DummyFunc2(object):
"""Dummy Func2.
Dummy Func2 Long Description.
"""
plugin_mock = mock.Mock(
functions={'dummy1': DummyFunc1,
'dummy2': DummyFunc2,
'removed': hot_functions.Removed})
dummy_tmpl = mock.Mock(plugin=plugin_mock)
class DummyMgr(object):
def __getitem__(self, item):
return dummy_tmpl
templ_mock.return_value = DummyMgr()
functions = self.eng.list_template_functions(self.ctx, 'dummytemplate')
expected = [{'functions': 'dummy1',
'description': 'Dummy Func1.'},
{'functions': 'dummy2',
'description': 'Dummy Func2.'}]
self.assertEqual(sorted(expected, key=lambda k: k['functions']),
sorted(functions, key=lambda k: k['functions']))
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_functions_version_not_found(self, templ_mock):
class DummyMgr(object):
def __getitem__(self, item):
raise KeyError()
templ_mock.return_value = DummyMgr()
version = 'dummytemplate'
ex = self.assertRaises(exception.NotFound,
self.eng.list_template_functions,
self.ctx,
version)
msg = "Template with version %s not found" % version
self.assertEqual(msg, six.text_type(ex))
def test_stack_list_outputs(self):
t = template_format.parse(tools.wp_template)
t['outputs'] = {
'test': {'value': '{ get_attr: fir }',
'description': 'sec'},
'test2': {'value': 'sec'}}
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'service_list_outputs_stack', tmpl)
self.patchobject(self.eng, '_get_stack')
self.patchobject(parser.Stack, 'load', return_value=stack)
outputs = self.eng.list_outputs(self.ctx, mock.ANY)
self.assertIn({'output_key': 'test',
'description': 'sec'}, outputs)
self.assertIn({'output_key': 'test2',
'description': 'No description given'},
outputs)
def test_stack_empty_list_outputs(self):
# Ensure that stack with no output returns empty list
t = template_format.parse(tools.wp_template)
t['outputs'] = {}
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'service_list_outputs_stack', tmpl)
self.patchobject(self.eng, '_get_stack')
self.patchobject(parser.Stack, 'load', return_value=stack)
outputs = self.eng.list_outputs(self.ctx, mock.ANY)
self.assertEqual([], outputs)
def test_get_environment(self):
# Setup
t = template_format.parse(tools.wp_template)
env = {'parameters': {'KeyName': 'EnvKey'}}
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'get_env_stack', tmpl)
mock_get_stack = self.patchobject(self.eng, '_get_stack')
mock_get_stack.return_value = mock.MagicMock()
mock_get_stack.return_value.raw_template.environment = env
self.patchobject(parser.Stack, 'load', return_value=stack)
# Test
found = self.eng.get_environment(self.ctx, stack.identifier())
# Verify
self.assertEqual(env, found)
def test_get_environment_no_env(self):
# Setup
exc = exception.EntityNotFound(entity='stack', name='missing')
self.patchobject(self.eng, '_get_stack', side_effect=exc)
# Test
self.assertRaises(dispatcher.ExpectedException,
self.eng.get_environment,
self.ctx,
'irrelevant')
def test_get_files(self):
# Setup
t = template_format.parse(tools.wp_template)
files = {'foo.yaml': 'i am a file'}
tmpl = templatem.Template(t, files=files)
stack = parser.Stack(self.ctx, 'get_env_stack', tmpl)
stack.store()
mock_get_stack = self.patchobject(self.eng, '_get_stack')
mock_get_stack.return_value = mock.MagicMock()
self.patchobject(templatem.Template, 'load', return_value=tmpl)
# Test
found = self.eng.get_files(self.ctx, stack.identifier())
# Verify
self.assertEqual(files, found)
def test_stack_show_output(self):
t = template_format.parse(tools.wp_template)
t['outputs'] = {'test': {'value': 'first', 'description': 'sec'},
'test2': {'value': 'sec'}}
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'service_list_outputs_stack', tmpl,
resolve_data=False)
self.patchobject(self.eng, '_get_stack')
self.patchobject(parser.Stack, 'load', return_value=stack)
output = self.eng.show_output(self.ctx, mock.ANY, 'test')
self.assertEqual({'output_key': 'test', 'output_value': 'first',
'description': 'sec'},
output)
# Ensure that stack raised NotFound error with incorrect key.
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_output,
self.ctx, mock.ANY, 'bunny')
self.assertEqual(exception.NotFound, ex.exc_info[0])
self.assertEqual('Specified output key bunny not found.',
six.text_type(ex.exc_info[1]))
def test_stack_show_output_error(self):
t = template_format.parse(tools.wp_template)
t['outputs'] = {'test': {'value': 'first', 'description': 'sec'}}
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'service_list_outputs_stack', tmpl,
resolve_data=False)
self.patchobject(self.eng, '_get_stack')
self.patchobject(parser.Stack, 'load', return_value=stack)
self.patchobject(
stack, 'output',
side_effect=[exception.EntityNotFound(entity='one', name='name')])
output = self.eng.show_output(self.ctx, mock.ANY, 'test')
self.assertEqual(
{'output_key': 'test',
'output_error': "The one (name) could not be found.",
'description': 'sec',
'output_value': None},
output)
def test_stack_list_all_empty(self):
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(0, len(sl))
def test_stack_describe_all_empty(self):
sl = self.eng.show_stack(self.ctx, None, resolve_outputs=True)
self.assertEqual(0, len(sl))
def test_get_template(self):
# Setup
t = template_format.parse(tools.wp_template)
tmpl = templatem.Template(t)
stack = parser.Stack(self.ctx, 'get_env_stack', tmpl)
mock_get_stack = self.patchobject(self.eng, '_get_stack')
mock_get_stack.return_value = mock.MagicMock()
mock_get_stack.return_value.raw_template.template = t
self.patchobject(parser.Stack, 'load', return_value=stack)
# Test
found = self.eng.get_template(self.ctx, stack.identifier())
# Verify
self.assertEqual(t, found)
def test_get_template_no_template(self):
# Setup
exc = exception.EntityNotFound(entity='stack', name='missing')
self.patchobject(self.eng, '_get_stack', side_effect=exc)
# Test
self.assertRaises(dispatcher.ExpectedException,
self.eng.get_template,
self.ctx,
'missing')
def _preview_stack(self, environment_files=None):
res._register_class('GenericResource1', generic_rsrc.GenericResource)
res._register_class('GenericResource2', generic_rsrc.GenericResource)
args = {}
params = {}
files = None
stack_name = 'SampleStack'
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'Lorem ipsum.',
'Resources': {
'SampleResource1': {'Type': 'GenericResource1'},
'SampleResource2': {'Type': 'GenericResource2'}}}
return self.eng.preview_stack(self.ctx, stack_name, tpl,
params, files, args,
environment_files=environment_files)
def test_preview_stack_returns_a_stack(self):
stack = self._preview_stack()
expected_identity = {'path': '',
'stack_id': 'None',
'stack_name': 'SampleStack',
'tenant': 'stack_service_test_tenant'}
self.assertEqual(expected_identity, stack['stack_identity'])
self.assertEqual('SampleStack', stack['stack_name'])
self.assertEqual('Lorem ipsum.', stack['description'])
def test_preview_stack_returns_list_of_resources_in_stack(self):
stack = self._preview_stack()
self.assertIsInstance(stack['resources'], list)
self.assertEqual(2, len(stack['resources']))
resource_types = set(r['resource_type'] for r in stack['resources'])
self.assertIn('GenericResource1', resource_types)
self.assertIn('GenericResource2', resource_types)
resource_names = set(r['resource_name'] for r in stack['resources'])
self.assertIn('SampleResource1', resource_names)
self.assertIn('SampleResource2', resource_names)
def test_preview_stack_validates_new_stack(self):
exc = exception.StackExists(stack_name='Validation Failed')
self.eng._validate_new_stack = mock.Mock(side_effect=exc)
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackExists, ex.exc_info[0])
@mock.patch.object(service.api, 'format_stack_preview', new=mock.Mock())
@mock.patch.object(service.parser, 'Stack')
def test_preview_stack_checks_stack_validity(self, mock_parser):
exc = exception.StackValidationFailed(message='Validation Failed')
mock_parsed_stack = mock.Mock()
mock_parsed_stack.validate.side_effect = exc
mock_parser.return_value = mock_parsed_stack
ex = self.assertRaises(dispatcher.ExpectedException,
self._preview_stack)
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
@mock.patch.object(env_util, 'merge_environments')
def test_preview_environment_files(self, mock_merge):
# Setup
environment_files = ['env_1']
# Test
self._preview_stack(environment_files=environment_files)
# Verify
mock_merge.assert_called_once_with(environment_files, None, {}, {})
@mock.patch.object(stack_object.Stack, 'get_by_name')
def test_validate_new_stack_checks_existing_stack(self, mock_stack_get):
mock_stack_get.return_value = 'existing_db_stack'
tmpl = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.StackExists, self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', tmpl)
@mock.patch.object(stack_object.Stack, 'count_all')
def test_validate_new_stack_checks_stack_limit(self, mock_db_count):
cfg.CONF.set_override('max_stacks_per_tenant', 99, enforce_type=True)
mock_db_count.return_value = 99
template = templatem.Template(
{'HeatTemplateFormatVersion': '2012-12-12'})
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', template)
def test_validate_new_stack_checks_incorrect_keywords_in_resource(self):
template = {'heat_template_version': '2013-05-23',
'resources': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = (u'"Type" is not a valid keyword '
'inside a resource definition')
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_incorrect_sections(self):
template = {'heat_template_version': '2013-05-23',
'unknown_section': {
'Res': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
ex = self.assertRaises(exception.StackValidationFailed,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack',
parsed_template)
msg = u'The template section is invalid: unknown_section'
self.assertEqual(msg, six.text_type(ex))
def test_validate_new_stack_checks_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 5, enforce_type=True)
template = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'Res1': {'Type': 'GenericResource1'},
'Res2': {'Type': 'GenericResource1'},
'Res3': {'Type': 'GenericResource1'},
'Res4': {'Type': 'GenericResource1'},
'Res5': {'Type': 'GenericResource1'},
'Res6': {'Type': 'GenericResource1'}}}
parsed_template = templatem.Template(template)
self.assertRaises(exception.RequestLimitExceeded,
self.eng._validate_new_stack,
self.ctx, 'test_existing_stack', parsed_template)
def test_validate_new_stack_handle_assertion_error(self):
tmpl = mock.MagicMock()
expected_message = 'Expected assertion error'
tmpl.validate.side_effect = AssertionError(expected_message)
exc = self.assertRaises(AssertionError, self.eng._validate_new_stack,
self.ctx, 'stack_name', tmpl)
self.assertEqual(expected_message, six.text_type(exc))
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch.object(stack_object.Stack, 'get_all')
@mock.patch.object(stack_object.Stack, 'get_by_id')
@mock.patch('heat.engine.stack_lock.StackLock',
return_value=mock.Mock())
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(context, 'get_admin_context')
def test_engine_reset_stack_status(
self,
mock_admin_context,
mock_stack_load,
mock_stacklock,
mock_get_by_id,
mock_get_all,
mock_thread):
mock_admin_context.return_value = self.ctx
db_stack = mock.MagicMock()
db_stack.id = 'foo'
db_stack.status = 'IN_PROGRESS'
db_stack.status_reason = None
unlocked_stack = mock.MagicMock()
unlocked_stack.id = 'bar'
unlocked_stack.status = 'IN_PROGRESS'
unlocked_stack.status_reason = None
unlocked_stack_failed = mock.MagicMock()
unlocked_stack_failed.id = 'bar'
unlocked_stack_failed.status = 'FAILED'
unlocked_stack_failed.status_reason = 'because'
mock_get_all.return_value = [db_stack, unlocked_stack]
mock_get_by_id.side_effect = [db_stack, unlocked_stack_failed]
fake_stack = mock.MagicMock()
fake_stack.action = 'CREATE'
fake_stack.id = 'foo'
fake_stack.status = 'IN_PROGRESS'
mock_stack_load.return_value = fake_stack
lock1 = mock.MagicMock()
lock1.get_engine_id.return_value = 'old-engine'
lock1.acquire.return_value = None
lock2 = mock.MagicMock()
lock2.acquire.return_value = None
mock_stacklock.side_effect = [lock1, lock2]
self.eng.thread_group_mgr = mock_thread
self.eng.reset_stack_status()
mock_admin_context.assert_called_once_with()
filters = {
'status': parser.Stack.IN_PROGRESS,
'convergence': False
}
mock_get_all.assert_called_once_with(self.ctx,
filters=filters,
show_nested=True)
mock_get_by_id.assert_has_calls([
mock.call(self.ctx, 'foo'),
mock.call(self.ctx, 'bar'),
])
mock_stack_load.assert_called_once_with(self.ctx,
stack=db_stack,
service_check_defer=True,
resource_validate=False,
resolve_data=False)
self.assertTrue(lock2.release.called)
mock_thread.start_with_acquired_lock.assert_called_once_with(
fake_stack, lock1,
self.eng.set_stack_and_resource_to_failed, fake_stack
)
def test_set_stack_and_resource_to_failed(self):
def fake_stack():
stk = mock.MagicMock()
stk.action = 'CREATE'
stk.id = 'foo'
stk.status = 'IN_PROGRESS'
stk.FAILED = 'FAILED'
def mock_stack_state_set(a, s, reason):
stk.status = s
stk.action = a
stk.status_reason = reason
stk.state_set = mock_stack_state_set
return stk
def fake_stack_resource(name, action, status):
rs = mock.MagicMock()
rs.name = name
rs.action = action
rs.status = status
rs.IN_PROGRESS = 'IN_PROGRESS'
rs.FAILED = 'FAILED'
def mock_resource_state_set(a, s, reason='engine_down'):
rs.status = s
rs.action = a
rs.status_reason = reason
rs.state_set = mock_resource_state_set
return rs
test_stack = fake_stack()
test_stack.resources = {
'r1': fake_stack_resource('r1', 'UPDATE', 'COMPLETE'),
'r2': fake_stack_resource('r2', 'UPDATE', 'IN_PROGRESS'),
'r3': fake_stack_resource('r3', 'UPDATE', 'FAILED')}
self.eng.set_stack_and_resource_to_failed(test_stack)
self.assertEqual('FAILED', test_stack.status)
self.assertEqual('COMPLETE', test_stack.resources.get('r1').status)
self.assertEqual('FAILED', test_stack.resources.get('r2').status)
self.assertEqual('FAILED', test_stack.resources.get('r3').status)
def test_parse_adopt_stack_data_without_parameters(self):
cfg.CONF.set_override('enable_stack_adopt', True, enforce_type=True)
template = {"heat_template_version": "2015-04-30",
"resources": {
"myres": {
"type": "OS::Cinder::Volume",
"properties": {
"name": "volname",
"size": "1"
}
}
}}
# Assert no KeyError exception raised like before, when trying to
# get parameters from adopt stack data which doesn't have it.
args = {"adopt_stack_data": '''{}'''}
self.eng._parse_template_and_validate_stack(
self.ctx, 'stack_name', template, {}, {}, None, args)
args = {"adopt_stack_data": '''{
"environment": {}
}'''}
self.eng._parse_template_and_validate_stack(
self.ctx, 'stack_name', template, {}, {}, None, args)
def test_parse_adopt_stack_data_with_parameters(self):
cfg.CONF.set_override('enable_stack_adopt', True, enforce_type=True)
template = {"heat_template_version": "2015-04-30",
"parameters": {
"volsize": {"type": "number"}
},
"resources": {
"myres": {
"type": "OS::Cinder::Volume",
"properties": {
"name": "volname",
"size": {"get_param": "volsize"}
}
}
}}
args = {"adopt_stack_data": '''{
"environment": {
"parameters": {
"volsize": 1
}
}}'''}
stack = self.eng._parse_template_and_validate_stack(
self.ctx, 'stack_name', template, {}, {}, None, args)
self.assertEqual(1, stack.parameters['volsize'])
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch.object(stack_object.Stack, 'get_by_id')
@mock.patch.object(parser.Stack, 'load')
def test_stack_cancel_update_convergence_with_no_rollback(
self, mock_load, mock_get_by_id, mock_tg):
stk = mock.MagicMock()
stk.id = 1
stk.UPDATE = 'UPDATE'
stk.IN_PROGRESS = 'IN_PROGRESS'
stk.state = ('UPDATE', 'IN_PROGRESS')
stk.status = stk.IN_PROGRESS
stk.action = stk.UPDATE
stk.convergence = True
mock_load.return_value = stk
self.patchobject(self.eng, '_get_stack')
self.eng.thread_group_mgr.start = mock.MagicMock()
with mock.patch.object(self.eng, 'worker_service') as mock_ws:
mock_ws.stop_traversal = mock.Mock()
# with rollback as false
self.eng.stack_cancel_update(self.ctx, 1,
cancel_with_rollback=False)
self.assertTrue(self.eng.thread_group_mgr.start.called)
call_args, _ = self.eng.thread_group_mgr.start.call_args
# test ID of stack
self.assertEqual(call_args[0], 1)
# ensure stop_traversal should be called with stack
self.assertEqual(call_args[1].func, mock_ws.stop_traversal)
self.assertEqual(call_args[1].args[0], stk)
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
@mock.patch.object(stack_object.Stack, 'get_by_id')
@mock.patch.object(parser.Stack, 'load')
def test_stack_cancel_update_convergence_with_rollback(
self, mock_load, mock_get_by_id, mock_tg):
stk = mock.MagicMock()
stk.id = 1
stk.UPDATE = 'UPDATE'
stk.IN_PROGRESS = 'IN_PROGRESS'
stk.state = ('UPDATE', 'IN_PROGRESS')
stk.status = stk.IN_PROGRESS
stk.action = stk.UPDATE
stk.convergence = True
stk.rollback = mock.MagicMock(return_value=None)
mock_load.return_value = stk
self.patchobject(self.eng, '_get_stack')
self.eng.thread_group_mgr.start = mock.MagicMock()
# with rollback as true
self.eng.stack_cancel_update(self.ctx, 1,
cancel_with_rollback=True)
self.eng.thread_group_mgr.start.assert_called_once_with(
1, stk.rollback)
| {
"content_hash": "c4445d1fa58a0f332ffde941728f3eb2",
"timestamp": "",
"source": "github",
"line_count": 1460,
"max_line_length": 79,
"avg_line_length": 43.50205479452055,
"alnum_prop": 0.5138475587674964,
"repo_name": "cwolferh/heat-scratch",
"id": "e8b1b9938a24cbde45074a2efe4b76a4ed002e65",
"size": "64088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_engine_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
"""Combine multiple one-epoch results files into one all-epoch result file
Take multiple file paths as command line arguments
Use last file path argument as the output file path
Warn user if the input files are not exactly every epoch file required
Warn user if the output file already exists
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
import os
import sys
NUM_PATH_UNDERSCORES_BEFORE_EPOCH_VALUE = 2
input_file_paths = sys.argv[1:-1]
output_file_path = sys.argv[-1]
rmse_values_by_epoch = {}
for rmse_file_path in input_file_paths:
with open(rmse_file_path, 'r') as rmse_file:
rmse_value = float(rmse_file.read().strip())
rmse_file_path_parts = rmse_file_path.split('_')
epochs_part = rmse_file_path_parts[NUM_PATH_UNDERSCORES_BEFORE_EPOCH_VALUE]
epoch = int(epochs_part[:epochs_part.index('epochs')])
rmse_values_by_epoch[epoch] = rmse_value
max_epoch = max(rmse_values_by_epoch.keys())
actual_epochs = sorted(rmse_values_by_epoch.keys())
expected_epochs = range(1, max_epoch + 1)
diff = [epoch for epoch in expected_epochs if epoch not in actual_epochs]
if len(diff) > 1:
raise Exception('Missing epochs: {}'
.format(', '.join([str(x) for x in diff])))
if len(input_file_paths) > len(expected_epochs):
raise Exception('Too many epochs - any extra files?\n'
'{}\n'
'Too many epochs - any extra files?'
.format('\n'.join(input_file_paths)))
assert not os.path.isfile(output_file_path), 'Output File Already Exists'
with open(output_file_path, 'w+') as output_file:
for epoch, rmse_value in sorted(rmse_values_by_epoch.items()):
output_file.write('{}\n'.format(rmse_value))
| {
"content_hash": "058817b4cf0361b1b67fa5a02c902a37",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 34.76,
"alnum_prop": 0.6766398158803222,
"repo_name": "jvanbrug/netflix",
"id": "69566d9451bd13ca1bcef39f5124fbc5cb5efa56",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/combine_one_line_results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5381"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "97404"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.3, s, t 0.6, s, t 1, s, q"
tags = "skeleton, BitmapSkin, Animate"
import cPickle
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos import skeleton
import pyglet
import sample_skeleton
import sample_skin
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.skin = skeleton.BitmapSkin(sample_skeleton.skeleton,
sample_skin.skin)
self.add( self.skin )
x, y = director.get_window_size()
self.skin.position = x/2, y/2
anim = cPickle.load(open("SAMPLE.anim"))
self.skin.do( cocos.actions.Repeat( skeleton.Animate(anim) ) )
def main():
director.init()
test_layer = TestLayer()
bg_layer = cocos.layer.ColorLayer(255,255,255,255)
main_scene = cocos.scene.Scene()
main_scene.add(bg_layer, z=-10)
main_scene.add(test_layer, z=10)
director.run(main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "e83e6adcd56b110e268437002cb8cacc",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 70,
"avg_line_length": 26.227272727272727,
"alnum_prop": 0.6256499133448874,
"repo_name": "eevee/cocos2d-mirror",
"id": "1b9e4336466f2a683299d8ea3af29d142742b55f",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_skeleton_anim.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1196228"
}
],
"symlink_target": ""
} |
from cmspwn.module import pwnfunc
@pwnfunc
def is_ampcms(cmspwn, response):
if 'AMP'.lower() in response.headers.get('set-cookie').split('=')[0].lower():
cmspwn.found = True; cmspwn.Framework = 'AMPCMS'; cmspwn.site = 'http://www.ampcms.org'
return
| {
"content_hash": "b27f93f9c7ee75077610edb8d79b7b65",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 95,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.6654275092936803,
"repo_name": "kenjoe41/cmspwn",
"id": "033930af6872e1b0856dbef5c31db94c6ef2296c",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmspwn/modules/ampcms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20684"
}
],
"symlink_target": ""
} |
""" This file contains code for working on lists and dictionaries. """
def moreThanOne(dict, key):
""" Checks if a key in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
key -- the key
Returns:
True if the key exists in the dictionary and the value is at least one, otherwise false
"""
return key in dict and dict[key] > 0
def anyMoreThanOne(dict, keys):
""" Checks if any of a list of keys in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
keys -- the keys
Returns:
True if any key exists in the dictionary and the value is at least one, otherwise false
"""
for key in keys:
if key in dict and dict[key] > 0:
return True
return False
def makeUnique(list):
""" Removes duplicates from a list. """
u = []
for l in list:
if not l in u:
u.append(l)
return u
def alphabetical(lst):
""" Sorts a list of tuples in reverse alphabetical order by the first key
in the tuple.
Arguments:
lst -- the list to sort
Returns:
the sorted list
"""
return list(reversed(sorted(lst, key=lambda x: x[0]))) | {
"content_hash": "6955552e59386c237f5e95d7bdc6e75a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 23.270833333333332,
"alnum_prop": 0.675022381378693,
"repo_name": "ephracis/hermes",
"id": "0b1e6124caea059ee318fbbc33482073caf518f0",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/lists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36977"
}
],
"symlink_target": ""
} |
"""
Player (OOC) commands. These are stored on the Player object
and self.caller is thus always a Player, not an Object/Character.
These commands go in the PlayerCmdset and are accessible also
when puppeting a Character (although with lower priority)
These commands use the player_caller property which tells the command
parent (MuxCommand, usually) to setup caller correctly. They use
self.player to make sure to always use the player object rather than
self.caller (which change depending on the level you are calling from)
The property self.character can be used to access the character when
these commands are triggered with a connected character (such as the
case of the @ooc command), it is None if we are OOC.
Note that under MULTISESSION_MODE > 2, Player- commands should use
self.msg() and similar methods to reroute returns to the correct
method. Otherwise all text will be returned to all connected sessions.
"""
from builtins import range
import time
from django.conf import settings
from evennia.server.sessionhandler import SESSIONS
from evennia.utils import utils, create, search, prettytable, evtable
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
_MAX_NR_CHARACTERS = settings.MAX_NR_CHARACTERS
_MULTISESSION_MODE = settings.MULTISESSION_MODE
# limit symbol import for API
__all__ = ("CmdOOCLook", "CmdIC", "CmdOOC", "CmdPassword", "CmdQuit",
"CmdCharCreate", "CmdOption", "CmdSessions", "CmdWho",
"CmdColorTest", "CmdQuell")
class MuxPlayerLookCommand(COMMAND_DEFAULT_CLASS):
"""
Custom parent (only) parsing for OOC looking, sets a "playable"
property on the command based on the parsing.
"""
def parse(self):
"Custom parsing"
super(MuxPlayerLookCommand, self).parse()
if _MULTISESSION_MODE < 2:
# only one character allowed - not used in this mode
self.playable = None
return
playable = self.player.db._playable_characters
if playable is not None:
# clean up list if character object was deleted in between
if None in playable:
playable = [character for character in playable if character]
self.player.db._playable_characters = playable
# store playable property
if self.args:
self.playable = dict((utils.to_str(char.key.lower()), char)
for char in playable).get(self.args.lower(), None)
else:
self.playable = playable
# Obs - these are all intended to be stored on the Player, and as such,
# use self.player instead of self.caller, just to be sure. Also self.msg()
# is used to make sure returns go to the right session
# note that this is inheriting from MuxPlayerLookCommand,
# and has the .playable property.
class CmdOOCLook(MuxPlayerLookCommand):
"""
look while out-of-character
Usage:
look
Look in the ooc state.
"""
#This is an OOC version of the look command. Since a
#Player doesn't have an in-game existence, there is no
#concept of location or "self". If we are controlling
#a character, pass control over to normal look.
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
player_caller = True
def func(self):
"implement the ooc look command"
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse {w@ic{n to get back into the game.")
return
# call on-player look helper method
self.msg(self.player.at_look(target=self.playable, session=self.session))
class CmdCharCreate(COMMAND_DEFAULT_CLASS):
"""
create a new character
Usage:
@charcreate <charname> [= desc]
Create a new character, optionally giving it a description. You
may use upper-case letters in the name - you will nevertheless
always be able to access your character using lower-case letters
if you want.
"""
key = "@charcreate"
locks = "cmd:pperm(Players)"
help_category = "General"
# this is used by the parent
player_caller = True
def func(self):
"create the new character"
player = self.player
if not self.args:
self.msg("Usage: @charcreate <charname> [= description]")
return
key = self.lhs
desc = self.rhs
charmax = _MAX_NR_CHARACTERS if _MULTISESSION_MODE > 1 else 1
if not player.is_superuser and \
(player.db._playable_characters and
len(player.db._playable_characters) >= charmax):
self.msg("You may only create a maximum of %i characters." % charmax)
return
from evennia.objects.models import ObjectDB
typeclass = settings.BASE_CHARACTER_TYPECLASS
if ObjectDB.objects.filter(db_typeclass_path=typeclass, db_key__iexact=key):
# check if this Character already exists. Note that we are only
# searching the base character typeclass here, not any child
# classes.
self.msg("{rA character named '{w%s{r' already exists.{n" % key)
return
# create the character
start_location = ObjectDB.objects.get_id(settings.START_LOCATION)
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
permissions = settings.PERMISSION_PLAYER_DEFAULT
new_character = create.create_object(typeclass, key=key,
location=start_location,
home=default_home,
permissions=permissions)
# only allow creator (and immortals) to puppet this char
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, player.id))
player.db._playable_characters.append(new_character)
if desc:
new_character.db.desc = desc
elif not new_character.db.desc:
new_character.db.desc = "This is a Player."
self.msg("Created new character %s. Use {w@ic %s{n to enter the game as this character." % (new_character.key, new_character.key))
class CmdIC(COMMAND_DEFAULT_CLASS):
"""
control an object you have permission to puppet
Usage:
@ic <character>
Go in-character (IC) as a given Character.
This will attempt to "become" a different object assuming you have
the right to do so. Note that it's the PLAYER character that puppets
characters/objects and which needs to have the correct permission!
You cannot become an object that is already controlled by another
player. In principle <character> can be any in-game object as long
as you the player have access right to puppet it.
"""
key = "@ic"
# lock must be all() for different puppeted objects to access it.
locks = "cmd:all()"
aliases = "@puppet"
help_category = "General"
# this is used by the parent
player_caller = True
def func(self):
"""
Main puppet method
"""
player = self.player
session = self.session
new_character = None
if not self.args:
new_character = player.db._last_puppet
if not new_character:
self.msg("Usage: @ic <character>")
return
if not new_character:
# search for a matching character
new_character = search.object_search(self.args)
if new_character:
new_character = new_character[0]
else:
self.msg("That is not a valid character choice.")
return
try:
player.puppet_object(session, new_character)
player.db._last_puppet = new_character
except RuntimeError as exc:
self.msg("{rYou cannot become {C%s{n: %s" % (new_character.name, exc))
# note that this is inheriting from MuxPlayerLookCommand,
# and as such has the .playable property.
class CmdOOC(MuxPlayerLookCommand):
"""
stop puppeting and go ooc
Usage:
@ooc
Go out-of-character (OOC).
This will leave your current character and put you in a incorporeal OOC state.
"""
key = "@ooc"
locks = "cmd:pperm(Players)"
aliases = "@unpuppet"
help_category = "General"
# this is used by the parent
player_caller = True
def func(self):
"Implement function"
player = self.player
session = self.session
old_char = player.get_puppet(session)
if not old_char:
string = "You are already OOC."
self.msg(string)
return
player.db._last_puppet = old_char
# disconnect
try:
player.unpuppet_object(session)
self.msg("\n{GYou go OOC.{n\n")
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse {w@ic{n to get back into the game.")
return
self.msg(player.at_look(target=self.playable, session=session))
except RuntimeError as exc:
self.msg("{rCould not unpuppet from {c%s{n: %s" % (old_char, exc))
class CmdSessions(COMMAND_DEFAULT_CLASS):
"""
check your connected session(s)
Usage:
@sessions
Lists the sessions currently connected to your account.
"""
key = "@sessions"
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
player_caller = True
def func(self):
"Implement function"
player = self.player
sessions = player.sessions.all()
table = prettytable.PrettyTable(["{wsessid",
"{wprotocol",
"{whost",
"{wpuppet/character",
"{wlocation"])
for sess in sorted(sessions, key=lambda x: x.sessid):
char = player.get_puppet(sess)
table.add_row([str(sess.sessid), str(sess.protocol_key),
type(sess.address) == tuple and sess.address[0] or sess.address,
char and str(char) or "None",
char and str(char.location) or "N/A"])
string = "{wYour current session(s):{n\n%s" % table
self.msg(string)
class CmdWho(COMMAND_DEFAULT_CLASS):
"""
list who is currently online
Usage:
who
doing
Shows who is currently online. Doing is an alias that limits info
also for those with all permissions.
"""
key = "who"
aliases = "doing"
locks = "cmd:all()"
# this is used by the parent
player_caller = True
def func(self):
"""
Get all connected players by polling session.
"""
player = self.player
session_list = SESSIONS.get_sessions()
session_list = sorted(session_list, key=lambda o: o.player.key)
if self.cmdstring == "doing":
show_session_data = False
else:
show_session_data = player.check_permstring("Immortals") or player.check_permstring("Wizards")
nplayers = (SESSIONS.player_count())
if show_session_data:
# privileged info
table = prettytable.PrettyTable(["{wPlayer Name",
"{wOn for",
"{wIdle",
"{wPuppeting",
"{wRoom",
"{wCmds",
"{wProtocol",
"{wHost"])
for session in session_list:
if not session.logged_in: continue
delta_cmd = time.time() - session.cmd_last_visible
delta_conn = time.time() - session.conn_time
player = session.get_player()
puppet = session.get_puppet()
location = puppet.location.key if puppet else "None"
table.add_row([utils.crop(player.name, width=25),
utils.time_format(delta_conn, 0),
utils.time_format(delta_cmd, 1),
utils.crop(puppet.key if puppet else "None", width=25),
utils.crop(location, width=25),
session.cmd_total,
session.protocol_key,
isinstance(session.address, tuple) and session.address[0] or session.address])
else:
# unprivileged
table = prettytable.PrettyTable(["{wPlayer name", "{wOn for", "{wIdle"])
for session in session_list:
if not session.logged_in:
continue
delta_cmd = time.time() - session.cmd_last_visible
delta_conn = time.time() - session.conn_time
player = session.get_player()
table.add_row([utils.crop(player.key, width=25),
utils.time_format(delta_conn, 0),
utils.time_format(delta_cmd, 1)])
isone = nplayers == 1
string = "{wPlayers:{n\n%s\n%s unique account%s logged in." % (table, "One" if isone else nplayers, "" if isone else "s")
self.msg(string)
class CmdOption(COMMAND_DEFAULT_CLASS):
"""
Set an account option
Usage:
@option[/save] [name = value]
Switch:
save - Save the current option settings for future logins.
clear - Clear the saved options.
This command allows for viewing and setting client interface
settings. Note that saved options may not be able to be used if
later connecting with a client with different capabilities.
"""
key = "@option"
aliases = "@options"
locks = "cmd:all()"
# this is used by the parent
player_caller = True
def func(self):
"""
Implements the command
"""
if self.session is None:
return
flags = self.session.protocol_flags
# Display current options
if not self.args:
# list the option settings
if "save" in self.switches:
# save all options
self.caller.db._saved_protocol_flags = flags
self.msg("{gSaved all options. Use @option/clear to remove.{n")
if "clear" in self.switches:
# clear all saves
self.caller.db._saved_protocol_flags = {}
self.msg("{gCleared all saved options.")
options = dict(flags) # make a copy of the flag dict
saved_options = dict(self.caller.attributes.get("_saved_protocol_flags", default={}))
if "SCREENWIDTH" in options:
if len(options["SCREENWIDTH"]) == 1:
options["SCREENWIDTH"] = options["SCREENWIDTH"][0]
else:
options["SCREENWIDTH"] = " \n".join("%s : %s" % (screenid, size)
for screenid, size in options["SCREENWIDTH"].iteritems())
if "SCREENHEIGHT" in options:
if len(options["SCREENHEIGHT"]) == 1:
options["SCREENHEIGHT"] = options["SCREENHEIGHT"][0]
else:
options["SCREENHEIGHT"] = " \n".join("%s : %s" % (screenid, size)
for screenid, size in options["SCREENHEIGHT"].iteritems())
options.pop("TTYPE", None)
header = ("Name", "Value", "Saved") if saved_options else ("Name", "Value")
table = evtable.EvTable(*header)
for key in sorted(options):
row = [key, options[key]]
if saved_options:
saved = " |YYes|n" if key in saved_options else ""
changed = "|y*|n" if key in saved_options and flags[key] != saved_options[key] else ""
row.append("%s%s" % (saved, changed))
table.add_row(*row)
self.msg("{wClient settings (%s):|n\n%s|n" % (self.session.protocol_key, table))
return
if not self.rhs:
self.msg("Usage: @option [name = [value]]")
return
# Try to assign new values
def validate_encoding(val):
# helper: change encoding
try:
utils.to_str(utils.to_unicode("test-string"), encoding=val)
except LookupError:
raise RuntimeError("The encoding '|w%s|n' is invalid. " % val)
return val
def validate_size(val):
return {0: int(val)}
def validate_bool(val):
return True if val.lower() in ("true", "on", "1") else False
def update(name, val, validator):
# helper: update property and report errors
try:
old_val = flags[name]
new_val = validator(val)
flags[name] = new_val
self.msg("Option |w%s|n was changed from '|w%s|n' to '|w%s|n'." % (name, old_val, new_val))
return {name: new_val}
except Exception, err:
self.msg("|rCould not set option |w%s|r:|n %s" % (name, err))
return False
validators = {"ANSI": validate_bool,
"CLIENTNAME": utils.to_str,
"ENCODING": validate_encoding,
"MCCP": validate_bool,
"MXP": validate_bool,
"NOMARKUP": validate_bool,
"NOPKEEPALIVE": validate_bool,
"OOB": validate_bool,
"RAW": validate_bool,
"SCREENHEIGHT": validate_size,
"SCREENWIDTH": validate_size,
"SCREENREADER": validate_bool,
"TERM": utils.to_str,
"UTF-8": validate_bool,
"XTERM256": validate_bool,
"INPUTDEBUG": validate_bool}
name = self.lhs.upper()
val = self.rhs.strip()
optiondict = False
if val and name in validators:
optiondict = update(name, val, validators[name])
else:
self.session.msg("|rNo option named '|w%s|r'." % name)
if optiondict:
# a valid setting
if "save" in self.switches:
# save this option only
saved_options = self.player.attributes.get("_saved_protocol_flags", default={})
saved_options.update(optiondict)
self.player.attributes.add("_saved_protocol_flags", saved_options)
for key in optiondict:
self.msg("{gSaved option %s.{n" % key)
if "clear" in self.switches:
# clear this save
for key in optiondict:
self.player.attributes.get("_saved_protocol_flags", {}).pop(key, None)
self.msg("{gCleared saved %s." % key)
self.session.update_flags(**optiondict)
class CmdPassword(COMMAND_DEFAULT_CLASS):
"""
change your password
Usage:
@password <old password> = <new password>
Changes your password. Make sure to pick a safe one.
"""
key = "@password"
locks = "cmd:pperm(Players)"
# this is used by the parent
player_caller = True
def func(self):
"hook function."
player = self.player
if not self.rhs:
self.msg("Usage: @password <oldpass> = <newpass>")
return
oldpass = self.lhslist[0] # this is already stripped by parse()
newpass = self.rhslist[0] # ''
if not player.check_password(oldpass):
self.msg("The specified old password isn't correct.")
elif len(newpass) < 3:
self.msg("Passwords must be at least three characters long.")
else:
player.set_password(newpass)
player.save()
self.msg("Password changed.")
class CmdQuit(COMMAND_DEFAULT_CLASS):
"""
quit the game
Usage:
@quit
Switch:
all - disconnect all connected sessions
Gracefully disconnect your current session from the
game. Use the /all switch to disconnect from all sessions.
"""
key = "@quit"
aliases = "quit"
locks = "cmd:all()"
# this is used by the parent
player_caller = True
def func(self):
"hook function"
player = self.player
if 'all' in self.switches:
player.msg("{RQuitting{n all sessions. Hope to see you soon again.", session=self.session)
for session in player.sessions.all():
player.disconnect_session_from_player(session)
else:
nsess = len(player.sessions.all())
if nsess == 2:
player.msg("{RQuitting{n. One session is still connected.", session=self.session)
elif nsess > 2:
player.msg("{RQuitting{n. %i session are still connected." % (nsess-1), session=self.session)
else:
# we are quitting the last available session
player.msg("{RQuitting{n. Hope to see you again, soon.", session=self.session)
player.disconnect_session_from_player(self.session)
class CmdColorTest(COMMAND_DEFAULT_CLASS):
"""
testing which colors your client support
Usage:
@color ansi|xterm256
Prints a color map along with in-mud color codes to use to produce
them. It also tests what is supported in your client. Choices are
16-color ansi (supported in most muds) or the 256-color xterm256
standard. No checking is done to determine your client supports
color - if not you will see rubbish appear.
"""
key = "@color"
aliases = "color"
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
player_caller = True
def table_format(self, table):
"""
Helper method to format the ansi/xterm256 tables.
Takes a table of columns [[val,val,...],[val,val,...],...]
"""
if not table:
return [[]]
extra_space = 1
max_widths = [max([len(str(val)) for val in col]) for col in table]
ftable = []
for irow in range(len(table[0])):
ftable.append([str(col[irow]).ljust(max_widths[icol]) + " " * extra_space
for icol, col in enumerate(table)])
return ftable
def func(self):
"Show color tables"
if self.args.startswith("a"):
# show ansi 16-color table
from evennia.utils import ansi
ap = ansi.ANSI_PARSER
# ansi colors
# show all ansi color-related codes
col1 = ["%s%s|n" % (code, code.replace("|", "||")) for code, _ in ap.ext_ansi_map[48:56]]
col2 = ["%s%s|n" % (code, code.replace("|", "||")) for code, _ in ap.ext_ansi_map[56:64]]
col3 = ["%s%s|n" % (code.replace("\\",""), code.replace("|", "||").replace("\\", "")) for code, _ in ap.ext_ansi_map[-8:]]
col2.extend(["" for i in range(len(col1)-len(col2))])
table = utils.format_table([col1, col2, col3])
string = "ANSI colors:"
for row in table:
string += "\n " + " ".join(row)
self.msg(string)
self.msg("||X : black. ||/ : return, ||- : tab, ||_ : space, ||* : invert, ||u : underline")
self.msg("To combine background and foreground, add background marker last, e.g. ||r||[B.")
elif self.args.startswith("x"):
# show xterm256 table
table = [[], [], [], [], [], [], [], [], [], [], [], []]
for ir in range(6):
for ig in range(6):
for ib in range(6):
# foreground table
table[ir].append("|%i%i%i%s|n" % (ir, ig, ib, "||%i%i%i" % (ir, ig, ib)))
# background table
table[6+ir].append("|%i%i%i|[%i%i%i%s|n" % (5 - ir, 5 - ig, 5 - ib,
ir, ig, ib,
"||[%i%i%i" % (ir, ig, ib)))
table = self.table_format(table)
string = "Xterm256 colors (if not all hues show, your client might not report that it can handle xterm256):"
for row in table:
string += "\n" + "".join(row)
self.msg(string)
#self.msg("(e.g. %%123 and %%[123 also work)")
else:
# malformed input
self.msg("Usage: @color ansi||xterm256")
class CmdQuell(COMMAND_DEFAULT_CLASS):
"""
use character's permissions instead of player's
Usage:
quell
unquell
Normally the permission level of the Player is used when puppeting a
Character/Object to determine access. This command will switch the lock
system to make use of the puppeted Object's permissions instead. This is
useful mainly for testing.
Hierarchical permission quelling only work downwards, thus a Player cannot
use a higher-permission Character to escalate their permission level.
Use the unquell command to revert back to normal operation.
"""
key = "@quell"
aliases = ["@unquell"]
locks = "cmd:pperm(Players)"
help_category = "General"
# this is used by the parent
player_caller = True
def _recache_locks(self, player):
"Helper method to reset the lockhandler on an already puppeted object"
if self.session:
char = self.session.puppet
if char:
# we are already puppeting an object. We need to reset
# the lock caches (otherwise the superuser status change
# won't be visible until repuppet)
char.locks.reset()
player.locks.reset()
def func(self):
"Perform the command"
player = self.player
permstr = player.is_superuser and " (superuser)" or " (%s)" % (", ".join(player.permissions.all()))
if self.cmdstring == '@unquell':
if not player.attributes.get('_quell'):
self.msg("Already using normal Player permissions%s." % permstr)
else:
player.attributes.remove('_quell')
self.msg("Player permissions%s restored." % permstr)
else:
if player.attributes.get('_quell'):
self.msg("Already quelling Player%s permissions." % permstr)
return
player.attributes.add('_quell', True)
puppet = self.session.puppet
if puppet:
cpermstr = " (%s)" % ", ".join(puppet.permissions.all())
cpermstr = "Quelling to current puppet's permissions%s." % cpermstr
cpermstr += "\n(Note: If this is higher than Player permissions%s, the lowest of the two will be used.)" % permstr
cpermstr += "\nUse @unquell to return to normal permission usage."
self.msg(cpermstr)
else:
self.msg("Quelling Player permissions%s. Use @unquell to get them back." % permstr)
self._recache_locks(player)
| {
"content_hash": "b602183c59d920c69bba2ade3ab597c0",
"timestamp": "",
"source": "github",
"line_count": 766,
"max_line_length": 138,
"avg_line_length": 36.18798955613577,
"alnum_prop": 0.5575036075036075,
"repo_name": "ergodicbreak/evennia",
"id": "e1b3ae31f2b72f560b2bc9ce7b87f0df0b98973f",
"size": "27720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evennia/commands/default/player.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39619"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13445"
},
{
"name": "JavaScript",
"bytes": "26538"
},
{
"name": "Python",
"bytes": "2198287"
}
],
"symlink_target": ""
} |
"""Test CLR <-> Python type conversions."""
import operator
import pytest
import System
from Python.Test import ConversionTest, UnicodeString
from Python.Runtime import PyObjectConversions
from Python.Runtime.Codecs import RawProxyEncoder
def test_bool_conversion():
"""Test bool conversion."""
ob = ConversionTest()
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = True
assert ob.BooleanField is True
assert ob.BooleanField is True
assert ob.BooleanField == 1
ob.BooleanField = False
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = 1
assert ob.BooleanField is True
assert ob.BooleanField is True
assert ob.BooleanField == 1
ob.BooleanField = 0
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = System.Boolean(None)
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = System.Boolean('')
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = System.Boolean(0)
assert ob.BooleanField is False
assert ob.BooleanField is False
assert ob.BooleanField == 0
ob.BooleanField = System.Boolean(1)
assert ob.BooleanField is True
assert ob.BooleanField is True
assert ob.BooleanField == 1
ob.BooleanField = System.Boolean('a')
assert ob.BooleanField is True
assert ob.BooleanField is True
assert ob.BooleanField == 1
def test_sbyte_conversion():
"""Test sbyte conversion."""
assert System.SByte.MaxValue == 127
assert System.SByte.MinValue == -128
ob = ConversionTest()
assert ob.SByteField == 0
ob.SByteField = 127
assert ob.SByteField == 127
ob.SByteField = -128
assert ob.SByteField == -128
ob.SByteField = System.SByte(127)
assert ob.SByteField == 127
ob.SByteField = System.SByte(-128)
assert ob.SByteField == -128
with pytest.raises(TypeError):
ConversionTest().SByteField = "spam"
with pytest.raises(TypeError):
ConversionTest().SByteField = None
with pytest.raises(OverflowError):
ConversionTest().SByteField = 128
with pytest.raises(OverflowError):
ConversionTest().SByteField = -129
with pytest.raises(OverflowError):
_ = System.SByte(128)
with pytest.raises(OverflowError):
_ = System.SByte(-129)
def test_byte_conversion():
"""Test byte conversion."""
assert System.Byte.MaxValue == 255
assert System.Byte.MinValue == 0
ob = ConversionTest()
assert ob.ByteField == 0
ob.ByteField = 255
assert ob.ByteField == 255
ob.ByteField = 0
assert ob.ByteField == 0
ob.ByteField = System.Byte(255)
assert ob.ByteField == 255
ob.ByteField = System.Byte(0)
assert ob.ByteField == 0
with pytest.raises(TypeError):
ConversionTest().ByteField = "spam"
with pytest.raises(TypeError):
ConversionTest().ByteField = None
with pytest.raises(OverflowError):
ConversionTest().ByteField = 256
with pytest.raises(OverflowError):
ConversionTest().ByteField = -1
with pytest.raises(OverflowError):
_ = System.Byte(256)
with pytest.raises(OverflowError):
_ = System.Byte(-1)
def test_char_conversion():
"""Test char conversion."""
assert System.Char.MaxValue == chr(65535)
assert System.Char.MinValue == chr(0)
ob = ConversionTest()
assert ob.CharField == u'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
ob.CharField = u'B'
assert ob.CharField == u'B'
ob.CharField = 67
assert ob.CharField == u'C'
with pytest.raises(OverflowError):
ConversionTest().CharField = 65536
with pytest.raises(OverflowError):
ConversionTest().CharField = -1
with pytest.raises(TypeError):
ConversionTest().CharField = None
def test_int16_conversion():
"""Test int16 conversion."""
assert System.Int16.MaxValue == 32767
assert System.Int16.MinValue == -32768
ob = ConversionTest()
assert ob.Int16Field == 0
ob.Int16Field = 32767
assert ob.Int16Field == 32767
ob.Int16Field = -32768
assert ob.Int16Field == -32768
ob.Int16Field = System.Int16(32767)
assert ob.Int16Field == 32767
ob.Int16Field = System.Int16(-32768)
assert ob.Int16Field == -32768
with pytest.raises(ValueError):
ConversionTest().Int16Field = "spam"
with pytest.raises(TypeError):
ConversionTest().Int16Field = None
with pytest.raises(OverflowError):
ConversionTest().Int16Field = 32768
with pytest.raises(OverflowError):
ConversionTest().Int16Field = -32769
with pytest.raises(OverflowError):
_ = System.Int16(32768)
with pytest.raises(OverflowError):
_ = System.Int16(-32769)
def test_int32_conversion():
"""Test int32 conversion."""
assert System.Int32.MaxValue == 2147483647
assert System.Int32.MinValue == -2147483648
ob = ConversionTest()
assert ob.Int32Field == 0
ob.Int32Field = 2147483647
assert ob.Int32Field == 2147483647
ob.Int32Field = -2147483648
assert ob.Int32Field == -2147483648
ob.Int32Field = System.Int32(2147483647)
assert ob.Int32Field == 2147483647
ob.Int32Field = System.Int32(-2147483648)
assert ob.Int32Field == -2147483648
with pytest.raises(ValueError):
ConversionTest().Int32Field = "spam"
with pytest.raises(TypeError):
ConversionTest().Int32Field = None
with pytest.raises(OverflowError):
ConversionTest().Int32Field = 2147483648
with pytest.raises(OverflowError):
ConversionTest().Int32Field = -2147483649
with pytest.raises(OverflowError):
_ = System.Int32(2147483648)
with pytest.raises(OverflowError):
_ = System.Int32(-2147483649)
def test_int64_conversion():
"""Test int64 conversion."""
assert System.Int64.MaxValue == 9223372036854775807
assert System.Int64.MinValue == -9223372036854775808
ob = ConversionTest()
assert ob.Int64Field == 0
ob.Int64Field = 9223372036854775807
assert ob.Int64Field == 9223372036854775807
ob.Int64Field = -9223372036854775808
assert ob.Int64Field == -9223372036854775808
ob.Int64Field = System.Int64(9223372036854775807)
assert ob.Int64Field == 9223372036854775807
ob.Int64Field = System.Int64(-9223372036854775808)
assert ob.Int64Field == -9223372036854775808
with pytest.raises(ValueError):
ConversionTest().Int64Field = "spam"
with pytest.raises(TypeError):
ConversionTest().Int64Field = None
with pytest.raises(OverflowError):
ConversionTest().Int64Field = 9223372036854775808
with pytest.raises(OverflowError):
ConversionTest().Int64Field = -9223372036854775809
with pytest.raises(OverflowError):
_ = System.Int64(9223372036854775808)
with pytest.raises(OverflowError):
_ = System.Int64(-9223372036854775809)
def test_uint16_conversion():
"""Test uint16 conversion."""
assert System.UInt16.MaxValue == 65535
assert System.UInt16.MinValue == 0
ob = ConversionTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 65535
assert ob.UInt16Field == 65535
ob.UInt16Field = -0
assert ob.UInt16Field == 0
ob.UInt16Field = System.UInt16(65535)
assert ob.UInt16Field == 65535
ob.UInt16Field = System.UInt16(0)
assert ob.UInt16Field == 0
with pytest.raises(ValueError):
ConversionTest().UInt16Field = "spam"
with pytest.raises(TypeError):
ConversionTest().UInt16Field = None
with pytest.raises(OverflowError):
ConversionTest().UInt16Field = 65536
with pytest.raises(OverflowError):
ConversionTest().UInt16Field = -1
with pytest.raises(OverflowError):
_ = System.UInt16(65536)
with pytest.raises(OverflowError):
_ = System.UInt16(-1)
def test_uint32_conversion():
"""Test uint32 conversion."""
assert System.UInt32.MaxValue == 4294967295
assert System.UInt32.MinValue == 0
ob = ConversionTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 4294967295
assert ob.UInt32Field == 4294967295
ob.UInt32Field = -0
assert ob.UInt32Field == 0
ob.UInt32Field = System.UInt32(4294967295)
assert ob.UInt32Field == 4294967295
ob.UInt32Field = System.UInt32(0)
assert ob.UInt32Field == 0
with pytest.raises(ValueError):
ConversionTest().UInt32Field = "spam"
with pytest.raises(TypeError):
ConversionTest().UInt32Field = None
with pytest.raises(OverflowError):
ConversionTest().UInt32Field = 4294967296
with pytest.raises(OverflowError):
ConversionTest().UInt32Field = -1
with pytest.raises(OverflowError):
_ = System.UInt32(4294967296)
with pytest.raises(OverflowError):
_ = System.UInt32(-1)
def test_uint64_conversion():
"""Test uint64 conversion."""
assert System.UInt64.MaxValue == 18446744073709551615
assert System.UInt64.MinValue == 0
ob = ConversionTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 18446744073709551615
assert ob.UInt64Field == 18446744073709551615
ob.UInt64Field = -0
assert ob.UInt64Field == 0
ob.UInt64Field = System.UInt64(18446744073709551615)
assert ob.UInt64Field == 18446744073709551615
ob.UInt64Field = System.UInt64(0)
assert ob.UInt64Field == 0
# Implicitly converts float 0.5 -> int 0
#with pytest.raises(TypeError):
#ConversionTest().UInt64Field = 0.5
with pytest.raises(ValueError):
ConversionTest().UInt64Field = "spam"
with pytest.raises(TypeError):
ConversionTest().UInt64Field = None
with pytest.raises(OverflowError):
ConversionTest().UInt64Field = 18446744073709551616
with pytest.raises(OverflowError):
ConversionTest().UInt64Field = -1
with pytest.raises(OverflowError):
_ = System.UInt64((18446744073709551616))
with pytest.raises(OverflowError):
_ = System.UInt64(-1)
def test_single_conversion():
"""Test single conversion."""
assert System.Single.MaxValue == pytest.approx(3.402823e38)
assert System.Single.MinValue == pytest.approx(-3.402823e38)
ob = ConversionTest()
assert ob.SingleField == 0.0
ob.SingleField = 3.402823e38
assert ob.SingleField == 3.402823e38
ob.SingleField = -3.402823e38
assert ob.SingleField == -3.402823e38
ob.SingleField = System.Single(3.402823e38)
assert ob.SingleField == 3.402823e38
ob.SingleField = System.Single(-3.402823e38)
assert ob.SingleField == -3.402823e38
with pytest.raises(TypeError):
ConversionTest().SingleField = "spam"
with pytest.raises(TypeError):
ConversionTest().SingleField = None
with pytest.raises(OverflowError):
ConversionTest().SingleField = 3.402824e38
with pytest.raises(OverflowError):
ConversionTest().SingleField = -3.402824e38
with pytest.raises(OverflowError):
_ = System.Single(3.402824e38)
with pytest.raises(OverflowError):
_ = System.Single(-3.402824e38)
def test_double_conversion():
"""Test double conversion."""
assert System.Double.MaxValue == 1.7976931348623157e308
assert System.Double.MinValue == -1.7976931348623157e308
ob = ConversionTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.7976931348623157e308
assert ob.DoubleField == 1.7976931348623157e308
ob.DoubleField = -1.7976931348623157e308
assert ob.DoubleField == -1.7976931348623157e308
ob.DoubleField = System.Double(1.7976931348623157e308)
assert ob.DoubleField == 1.7976931348623157e308
ob.DoubleField = System.Double(-1.7976931348623157e308)
assert ob.DoubleField == -1.7976931348623157e308
with pytest.raises(TypeError):
ConversionTest().DoubleField = "spam"
with pytest.raises(TypeError):
ConversionTest().DoubleField = None
def test_decimal_conversion():
"""Test decimal conversion."""
from System import Decimal
assert Decimal.ToInt64(Decimal(10)) == 10
ob = ConversionTest()
assert ob.DecimalField == Decimal(0)
ob.DecimalField = Decimal(10)
assert ob.DecimalField == Decimal(10)
ob.DecimalField = Decimal.One
assert ob.DecimalField == Decimal.One
ob.DecimalField = Decimal.Zero
assert ob.DecimalField == Decimal.Zero
with pytest.raises(TypeError):
ConversionTest().DecimalField = None
with pytest.raises(TypeError):
ConversionTest().DecimalField = "spam"
def test_timedelta_conversion():
import datetime
ob = ConversionTest()
assert type(ob.TimeSpanField) is type(datetime.timedelta(0))
assert ob.TimeSpanField.days == 0
ob.TimeSpanField = datetime.timedelta(days=1)
assert ob.TimeSpanField.days == 1
with pytest.raises(TypeError):
ConversionTest().TimeSpanField = None
with pytest.raises(TypeError):
ConversionTest().TimeSpanField = "spam"
def test_datetime_conversion():
from datetime import datetime
ob = ConversionTest()
assert type(ob.DateTimeField) is type(datetime(1,1,1))
assert ob.DateTimeField.day == 1
ob.DateTimeField = datetime(2000,1,2)
assert ob.DateTimeField.day == 2
assert ob.DateTimeField.month == 1
assert ob.DateTimeField.year == 2000
with pytest.raises(TypeError):
ConversionTest().DateTimeField = None
with pytest.raises(TypeError):
ConversionTest().DateTimeField = "spam"
def test_string_conversion():
"""Test string / unicode conversion."""
ob = ConversionTest()
assert ob.StringField == "spam"
assert ob.StringField == u"spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
assert ob.StringField == u"eggs"
ob.StringField = u"spam"
assert ob.StringField == "spam"
assert ob.StringField == u"spam"
ob.StringField = u'\uffff\uffff'
assert ob.StringField == u'\uffff\uffff'
ob.StringField = System.String("spam")
assert ob.StringField == "spam"
assert ob.StringField == u"spam"
ob.StringField = System.String(u'\uffff\uffff')
assert ob.StringField == u'\uffff\uffff'
ob.StringField = None
assert ob.StringField is None
with pytest.raises(TypeError):
ConversionTest().StringField = 1
world = UnicodeString()
test_unicode_str = u"안녕"
assert test_unicode_str == str(world.value)
assert test_unicode_str == str(world.GetString())
assert test_unicode_str == str(world)
def test_interface_conversion():
"""Test interface conversion."""
from Python.Test import Spam, ISpam
ob = ConversionTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
# need to test spam subclass here.
ob.SpamField = None
assert ob.SpamField is None
with pytest.raises(TypeError):
ob = ConversionTest()
ob.SpamField = System.String("bad")
with pytest.raises(TypeError):
ob = ConversionTest()
ob.SpamField = System.Int32(1)
def test_object_conversion():
"""Test ob conversion."""
from Python.Test import Spam
ob = ConversionTest()
assert ob.ObjectField is None
ob.ObjectField = Spam("eggs")
assert ob.ObjectField.__class__.__name__ == "Spam"
assert ob.ObjectField.GetValue() == "eggs"
ob.ObjectField = None
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
# need to test subclass here
class Foo(object):
pass
ob.ObjectField = Foo
assert ob.ObjectField == Foo
def test_enum_conversion():
"""Test enum conversion."""
from Python.Test import ShortEnum
ob = ConversionTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
ob.EnumField = 0
assert ob.EnumField == ShortEnum.Zero
assert ob.EnumField == 0
ob.EnumField = 1
assert ob.EnumField == ShortEnum.One
assert ob.EnumField == 1
with pytest.raises(ValueError):
ob = ConversionTest()
ob.EnumField = 10
with pytest.raises(ValueError):
ob = ConversionTest()
ob.EnumField = 255
with pytest.raises(OverflowError):
ob = ConversionTest()
ob.EnumField = 1000000
with pytest.raises(ValueError):
ob = ConversionTest()
ob.EnumField = "spam"
def test_null_conversion():
"""Test null conversion."""
import System
ob = ConversionTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
pi = 22/7
assert ob.Echo[System.Double](pi) == pi
assert ob.Echo[System.DateTime](None) is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
ConversionTest().Int32Field = None
with pytest.raises(TypeError):
ConversionTest().EnumField = None
def test_byte_array_conversion():
"""Test byte array conversion."""
ob = ConversionTest()
assert ob.ByteArrayField is None
ob.ByteArrayField = [0, 1, 2, 3, 4]
array = ob.ByteArrayField
assert len(array) == 5
assert array[0] == 0
assert array[4] == 4
value = b"testing"
ob.ByteArrayField = value
array = ob.ByteArrayField
for i, _ in enumerate(value):
assert array[i] == operator.getitem(value, i)
def test_sbyte_array_conversion():
"""Test sbyte array conversion."""
ob = ConversionTest()
assert ob.SByteArrayField is None
ob.SByteArrayField = [0, 1, 2, 3, 4]
array = ob.SByteArrayField
assert len(array) == 5
assert array[0] == 0
assert array[4] == 4
value = b"testing"
ob.SByteArrayField = value
array = ob.SByteArrayField
for i, _ in enumerate(value):
assert array[i] == operator.getitem(value, i)
def test_codecs():
"""Test codec registration from Python"""
class ListAsRawEncoder(RawProxyEncoder):
__namespace__ = "Python.Test"
def CanEncode(self, clr_type):
return clr_type.Name == "List`1" and clr_type.Namespace == "System.Collections.Generic"
list_raw_encoder = ListAsRawEncoder()
PyObjectConversions.RegisterEncoder(list_raw_encoder)
ob = ConversionTest()
l = ob.ListField
l.Add(42)
assert ob.ListField.Count == 1
| {
"content_hash": "115015a73e7b74271f347be78d390668",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 99,
"avg_line_length": 25.62533692722372,
"alnum_prop": 0.671242242558115,
"repo_name": "AlexCatarino/pythonnet",
"id": "f8b67d3eb05ff38544e25424981f89dbf3ab9a83",
"size": "19018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "C",
"bytes": "13916"
},
{
"name": "C#",
"bytes": "762225"
},
{
"name": "C++",
"bytes": "4895"
},
{
"name": "PowerShell",
"bytes": "3976"
},
{
"name": "Python",
"bytes": "264931"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# coding: utf-8
# Use Gzipped files without extractionpyp
import shutil
import logging
import datetime
import os # operation system packages
import ConfigParser as configparser # a package to parse INI file or confige file.
import argparse # a package to parse commandline arguments.
import sys
from re import sub
import gzip
from subprocess import call # to run command line scripts
from subprocess import Popen, PIPE, check_output
from multiprocessing.dummy import Pool as Pool
__version__ = '0.2.7.2'
__author__ = "Attayeb Mohsen"
__date__ = "23/1/2019"
## 30/8/2018 add primer-length parameter
## 23/1/2019 correct a merging bug
## add start_at_chimera_removal option
starting_message = """ Microbiome analysis using multiple methods
Version: %s
Date: %s
National Institutes of Biomedical Innovation, Health, and Nutrition\n""" % \
(__version__, __date__)
ID = str(datetime.datetime.now())
ID = ID.replace(" ", "")
ID = ID.replace(":", "")
ID = ID.replace(".", "")
ID = ID.replace("-", "")
ID = ID[0:14]
PR = dict({"id": ID}) # PARAMETERS dict
def remove_short_reads(infqfile, outfqfile, length):
"""
:param infqfile: input fastq file name.
:type infqfile: str
:param outfqfile: output fastq file name, after removing short reads.
:type outfqfile: str
:param length: minimum reads length.
:type length: int
:rtype: None
:return: None
@Action: filter fastq files removing short reads
"""
infq = open(infqfile, "r")
outfq = open(outfqfile, "w")
lines = infq.readlines()
for a, b, c, d in zip(lines[0::4], lines[1::4], lines[2::4], lines[3::4]):
if len(b) > length:
outfq.write(a)
outfq.write(b)
outfq.write(c)
outfq.write(d)
infq.close()
outfq.close()
def asfolder(folder):
"""
Add "/" at the end of the folder if not inserted
:param folder: the folder name
:type folder: str
:return: file names with / at the end
:rtype: str
"""
if folder[-1] != "/":
return (folder + "/")
else:
return (folder)
def execute(command, shell=True):
"""
Execute command using os package and return output to log file
:param command: The command to be executed
:type command: str
:param shell: Takes either True or False
:type shell: boolean
:return: Run the command in the background and save the
output to the logging file.
"""
loginfo(command)
p = Popen(command.split(), stderr=PIPE, stdout=PIPE)
output, error = p.communicate()
if output != b"":
loginfo(output.encode('utf-8'))
if error != b"":
logwarning(error.encode('utf-8'))
def loginfo(message):
"""
save information to log file
:param message: saved to log file
:type message: str
:return:
"""
logging.info(message.encode('utf-8'))
def logwarning(message):
logging.warning(message.encode('utf-8'))
def get_configuration():
global PR
cp = configparser.ConfigParser()
cp.read(PR['ConfigFile'])
PR['Ftrimmed'] = asfolder(cp.get('FOLDERS', 'trimmed'))
PR['Fmerged'] = asfolder(cp.get('FOLDERS', 'merged'))
PR['Fqc'] = asfolder(cp.get('FOLDERS', 'quality_step'))
PR['Fchi'] = asfolder(cp.get('FOLDERS', 'chimera_removed'))
PR['Fotus'] = asfolder(cp.get('FOLDERS', 'otus'))
PR['Fdiv'] = asfolder(cp.get('FOLDERS', 'diversity_analyses'))
PR['Fothers'] = asfolder(cp.get('FOLDERS', 'others'))
PR['number_of_cores'] = int(cp.get('GENERAL', 'jobs_to_start'))
PR['silva_taxonomy'] = cp.get('SILVA', 'taxonomy')
PR['silva_reference_seqs'] = cp.get('SILVA', 'reference_seqs')
PR['silva_core_alignment'] = cp.get('SILVA', 'core_alignment')
PR['silva_chim_ref'] = cp.get('CHIMERA', 'silva')
PR['gg_taxonomy'] = cp.get('GG', 'taxonomy')
PR['gg_reference_seqs'] = cp.get('GG', 'reference_seqs')
PR['gg_core_alignment'] = cp.get('GG', 'core_alignment')
PR['gg_chim_ref'] = cp.get('CHIMERA', 'gg')
PR['unite_taxonomy'] = cp.get('UNITE', 'taxonomy')
PR['unite_reference_seqs'] = cp.get('UNITE', 'reference_seqs')
PR['similarity'] = cp.get('GENERAL', 'similarity')
PR['blast_e_value'] = cp.get('GENERAL', 'blast_e_value')
PR['bbmap_resources'] = cp.get('bbmap', 'resources')
def locate_bbmap():
"""
locate the folder of bbmap
:return:
"""
folder = check_output(["locate", "bbmerge.sh"]).decode("utf-8")
return (sub('bbmerge.sh\n$', '', folder))
def check_before_start():
"""
:return:
"""
if os.path.isfile(PR['ConfigFile']):
pass
else:
raise IOError("configuration file does not exist")
if PR['rdb'] == "silva":
condition = True
condition = condition and os.path.isfile(PR['silva_taxonomy'])
condition = condition and os.path.isfile(PR['silva_reference_seqs'])
condition = condition and os.path.isfile(PR['silva_core_alignment'])
if not condition:
raise IOError("Can not find Silva database files, "
"please check the configuration file: %s "
"to set up the correct folder" % PR['ConfigFile'])
if PR['rdb'] == "gg":
condition = True
condition = condition and os.path.isfile(PR['gg_taxonomy'])
condition = condition and os.path.isfile(PR['gg_reference_seqs'])
condition = condition and os.path.isfile(PR['gg_core_alignment'])
if not condition:
raise IOError("Can not find greengenes database files, "
"please check the configuration file: %s to set up the correct folder" % PR['ConfigFile'])
if os.path.isdir(PR['out_folder']):
raise IOError("Output folder exists, Please use a non existent folder name")
def write_parameter_file(parameter_file):
"""
:param parameter_file:
:return:
"""
if PR['rdb'] == "silva":
parameter_string = """
assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s
assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s
pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s
pick_otus:enable_rev_strand_match True
filter_alignment.py:pynast_template_alignment_fp\t%(core_alignment)s
parallel:jobs_to_start\t%(jobs_to_start)d
assign_taxonomy:similarity\t%(similarity)s
""" % {'taxonomy': PR['silva_taxonomy'],
'reference_seqs': PR['silva_reference_seqs'],
'core_alignment': PR['silva_core_alignment'],
'jobs_to_start': PR['number_of_cores'],
'similarity': PR['similarity']}
elif PR['rdb'] == "unite":
# pass
parameter_string = """
assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s
assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s
pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s
parallel:jobs_to_start\t%(jobs_to_start)d
assign_taxonomy:assignment_method blast
# should we use e_value or blast_e_value
parallel_assign_taxonomy_blast:e_value\t%(blast_e_value)s
# comment
""" % {'taxonomy': PR['unite_taxonomy'],
'reference_seqs': PR['unite_reference_seqs'],
'jobs_to_start': PR['number_of_cores'],
'blast_e_value': PR['blast_e_value']}
else:
parameter_string = '''
assign_taxonomy:id_to_taxonomy_fp\t%(taxonomy)s
assign_taxonomy:reference_seqs_fp\t%(reference_seqs)s
pick_otus.py:pick_otus_reference_seqs_fp\t%(reference_seqs)s
pick_otus:enable_rev_strand_match True
filter_alignment.py:pynast_template_alignment_fp\t%(core_alignment)s
parallel:jobs_to_start\t%(jobs_to_start)d
assign_taxonomy:similarity\t%(similarity)s
''' % {'taxonomy': PR['gg_taxonomy'],
'reference_seqs': PR['gg_reference_seqs'],
'core_alignment': PR['gg_core_alignment'],
'jobs_to_start': PR['number_of_cores'],
'similarity': PR['similarity']}
if os.path.exists(PR['others']):
pass
else:
os.mkdir(PR['others'])
f = open(parameter_file, "w")
f.write(parameter_string)
f.close()
#def copyfilesanddecompress(inFolder, outFolder):
# shutil.copytree(asfolder(inFolder), asfolder(outFolder))
# print('copying files')
# execute("gunzip %s*.gz"%asfolder(outFolder))
# print('decompress files')
def primertrim(infqfile, outfqfile, length):
"""
:param infqfile:
:param outfqfile:
:param length:
:return:
"""
if infqfile.endswith(".gz"):
infq = gzip.open(infqfile, "r")
else:
infq = open(infqfile, "r")
if outfqfile.endswith(".gz"):
outfq = gzip.open(outfqfile, "w")
else:
outfq = open(outfqfile, "w")
lines = infq.readlines()
for a, b, c, d in zip(lines[0::4], lines[1::4], lines[2::4], lines[3::4]):
outfq.write(a)
outfq.write(b[length:])
outfq.write(c)
outfq.write(d[length:])
infq.close()
outfq.close()
def trimfolder(inFolder, outFolder, trimq, ftrim=True):
"""
"""
import os
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
files = os.listdir(inFolder)
files.sort()
ins1 = [x for x in files if "_R1_" in x]
ins2 = [x.replace("_R1_", "_R2_") for x in ins1]
os.mkdir(outFolder)
# call("mkdir -p %s" % out_folder, shell=True)
print("Trimming...")
# get_ipython().system(u'mkdir -p {out_folder}')
def process(i):
in1 = inFolder + ins1[i]
in2 = inFolder + ins2[i]
print("\n%s and %s" % (ins1[i], ins2[i]))
out1 = outFolder + ins1[i]
out2 = outFolder + ins2[i]
out1_temp1 = outFolder + "temp1_" + ins1[i]
out2_temp1 = outFolder + "temp1_" + ins2[i]
# forctrimleft was added
if ftrim:
primertrim(in1, out1_temp1, PR['primertrim_forward'])
primertrim(in2, out2_temp1, PR['primertrim_reverse'])
else:
out1_temp1 = in1
out2_temp1 = in2
if PR['adapter_ref'] != None:
execute(
"bbduk.sh -Xmx1000m -in1=%s -in2=%s -out1=%s -out2=%s -outm=stdout.fa -ref=%s -qtrim=r -trimq=%d -k=18 -ktrim=f" %
(out1_temp1, out2_temp1, out1, out2, PR['adapter_ref'], trimq), shell=True)
else:
execute(
"bbduk.sh -Xmx1000m -in1=%s -in2=%s -out1=%s -out2=%s -qtrim=r -trimq=%d" %
(out1_temp1, out2_temp1, out1, out2, trimq), shell=True)
os.remove(out1_temp1)
os.remove(out2_temp1)
p = Pool(PR['number_of_cores'])
p.map(process, range(len(ins1)))
def mergefolderbb(inFolder, outFolder, maxloose=True):
"""
"""
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
import os
files = os.listdir(inFolder)
files.sort()
ins1 = [x for x in files if "_R1_" in x]
ins2 = [x.replace("_R1_", "_R2_") for x in ins1]
outs = [x.replace("_L001_R1_001", "") for x in ins1]
os.mkdir(outFolder)
print("\nMerging ...")
def process(i):
in1 = inFolder + ins1[i]
in2 = inFolder + ins2[i]
print("%s and %s" % (ins1[i], ins2[i]))
out = outFolder + outs[i]
if maxloose:
execute("bbmerge.sh -in1=%s -in2=%s -out=%s -maxloose=t -ignorebadquality" % (in1, in2, out), shell=True)
else:
execute("bbmerge.sh -in1=%s -in2=%s -out=%s -ignorebadquality" % (in1, in2, out), shell=True)
if PR['remove_intermediate']:
os.remove(in1)
os.remove(in2)
p = Pool(PR['number_of_cores'])
p.map(process, range(len(ins1)))
if PR['remove_intermediate']:
os.removedirs(inFolder)
print("Merging finished.")
def mergefolder(inFolder, outFolder, pp):
"""
"""
global PR
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
files = os.listdir(inFolder)
files.sort()
ins1 = [x for x in files if "_R1_" in x]
ins2 = [x.replace("_R1_", "_R2_") for x in ins1]
outs = [x.replace("_L001_R1_001", "") for x in ins1]
os.mkdir(outFolder)
def process(i):
in1 = inFolder + ins1[i]
in2 = inFolder + ins2[i]
print("Merging: %s and %s " % (ins1[i], ins2[i]))
out = outFolder + "temp_" + outs[i]
out_final = outFolder + outs[i]
if out_final.endswith(".gz"):
out_final = sub(".gz", "", out_final)
execute("fastq-join -p %d %s %s -o %s" % (pp, in1, in2, out), shell=True)
os.remove("%sun1" % out)
os.remove("%sun2" % out)
os.rename("%sjoin" % out, out)
remove_short_reads(out, out_final, PR['minimum_length'])
os.remove(out)
if PR['remove_intermediate']:
os.remove(in1)
os.remove(in2)
p = Pool(PR['number_of_cores'])
p.map(process, range(len(ins1)))
if PR['remove_intermediate']:
os.removedirs(inFolder)
def qualitycontrol(inFolder, outFolder, q):
"""
"""
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
import os
files = os.listdir(inFolder)
files.sort()
os.mkdir(outFolder)
# call("mkdir -p %s " % out_folder, shell=True)
def process(i):
temp = outFolder + "temp" + i + "/"
print("\nQuality control: %s" % i)
sampleId = i.replace(".fastq", "")
inFile = inFolder + i
outFile = outFolder + i.replace(".fastq", ".fasta")
execute("""split_libraries_fastq.py -i %s -o %s --barcode_type not-barcoded --sample_ids %s -q %s""" % (
inFile, temp, sampleId, q), shell=True)
tempFile = temp + "seqs.fna"
call("mv %s %s" % (tempFile, outFile), shell=True)
call("rm -r %s" % temp, shell=True)
if PR['remove_intermediate']:
os.remove(inFile)
p = Pool(PR['number_of_cores'])
p.map(process, files)
print("Quality control finished.")
if PR['remove_intermediate']:
os.removedirs(inFolder)
def removechimera(inFolder, outFolder, rdb="silva"):
"""
"""
global PR
import os
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
files = os.listdir(inFolder)
files.sort()
os.mkdir(outFolder)
# call("mkdir -p %s" % out_folder, shell=True)
def process(i):
print("Chimera removal: %s" % i)
temp = outFolder + "temp" + i + "/"
if rdb == "silva":
execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s"
% (inFolder + i, temp + i, PR['silva_chim_ref']),
shell=True)
else:
execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s" % (
inFolder + i, temp + i, PR['gg_chim_ref']),
shell=True)
execute("filter_fasta.py -f %s -o %s -s %s/non_chimeras.txt" % (inFolder + i, outFolder + i, temp + i),
shell=True)
call("rm -r %s" % temp, shell=True)
if PR['remove_intermediate']:
os.remove(inFolder+i)
p = Pool(PR['number_of_cores'])
p.map(process, files)
if PR['remove_intermediate']:
os.removedirs(inFolder)
def pickotus(inFolder, outFolder, rdb="silva", fungus=False):
"""
"""
# TODO : add no parallel option
global PR
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
inFolder_fasta = inFolder + "*.fasta"
print("Otu picking...")
if PR['np']:
parallel_string = ""
else:
parallel_string = "-a -O %d" % PR['number_of_cores']
if PR['c_ref'] != "none":
if rdb == "silva":
execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s"
% (
inFolder_fasta, outFolder, PR['parameter_file_name'], PR['c_ref'], parallel_string, PR['c_otu_id']),
shell=True)
#execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
# % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
# out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
# PR['silva_reference_seqs']), shell=True)
elif fungus:
execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s --suppress_align_and_tree"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True)
else:
execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s %s -n %s"
% (inFolder_fasta, outFolder,
PR['c_ref'], PR['parameter_file_name'],
parallel_string, PR['c_otu_id']), shell=True)
#execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
# % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
# out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
# PR['gg_reference_seqs']), shell=True)
else:
if rdb == "silva":
execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], PR['silva_reference_seqs'], parallel_string,
PR['c_otu_id']),
shell=True)
execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
% (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
PR['silva_reference_seqs']), shell=True)
elif fungus:
execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s--suppress_align_and_tree"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string,
PR['c_otu_id']), shell=True)
else:
execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s -n %s"
% (inFolder_fasta, outFolder,
PR['gg_reference_seqs'], PR['parameter_file_name'],
parallel_string, PR['c_otu_id']), shell=True)
execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
% (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
PR['gg_reference_seqs']), shell=True)
if PR['remove_intermediate']:
os.removedirs(inFolder)
def writedf(outFile, ids, sampleIds):
f = open(outFile, "w+")
f.write("#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRead\tFile\tDescription\n")
for x in range(len(ids)):
f.write("%s\t\t\tR1\t%s\tsingle_file\n" % (ids[x], sampleIds[x]))
f.close()
def create_map(inFolder, outFile):
"""
"""
inFolder = asfolder(inFolder)
print("Writing mapping file")
import os
sampleIds = os.listdir(inFolder)
ids = [x.replace(".fasta", "") for x in sampleIds]
ids = [x.split("_")[0] for x in ids]
d = {'#SampleID': ids}
writedf(outFile, ids, sampleIds)
def corediv(inFolder, outFolder, mappingFile, depth):
"""
"""
print("Core diversity analyses...")
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
if PR['fungus']:
biom = inFolder + "otu_table_mc2_w_tax.biom"
else:
biom = inFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom"
tree = inFolder + "rep_set.tre"
# get_ipython().system(
# u'core_diversity_analyses.py -i {biom} -o {out_folder} -m {mapping_file} -t {tree} -e {depth}')
if PR['fungus']:
execute("core_diversity_analyses.py -i %s -o %s -m %s -e %d --nonphylogenetic_diversity" % (
biom, outFolder, mappingFile, depth),
shell=True)
else:
execute(
"core_diversity_analyses.py -i %s -o %s -m %s -t %s -e %d" % (biom, outFolder, mappingFile, tree, depth),
shell=True)
def full_analysis(inFolder, outFolder, depth, rdb, trimq, joining_method,
qcq, maxloose, fastq_p):
global PR
"""
"""
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
chi = asfolder(outFolder + PR['Fchi'])
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("Wrong method")
qualitycontrol(merged, qc, qcq)
removechimera(qc, chi, rdb)
pickotus(chi, otus, rdb)
# here
if create_mapping_file:
create_map(qc, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def stop_at_merging(inFolder, outFolder, trimq, joining_method, maxloose, fastq_p):
global PR
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder) + PR['Fmerged']
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
def stop_at_quality_control(inFolder, outFolder, joining_method, trimq,
qcq, maxloose, fastq_p):
global PR
"""
"""
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
qualitycontrol(merged, qc, qcq)
def stop_at_chimera_removal(inFolder, outFolder, rdb, trimq, joining_method,
qcq, maxloose, fastq_p):
"""
"""
global PR
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
chi = asfolder(outFolder + PR['Fchi'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
qualitycontrol(merged, qc, qcq)
removechimera(qc, chi, rdb)
def start_at_chimera_removal(inFolder, outFolder, rdb, depth):
global PR
qc = asfolder(inFolder)
chi = asfolder(outFolder + PR['Fchi'])
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
removechimera(qc, chi, rdb)
pickotus(chi, otus, rdb)
# here
if create_mapping_file:
create_map(qc, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def start_otu_pickng(inFolder, outFolder, depth, rdb):
"""
"""
global PR
chi = asfolder(inFolder)
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
pickotus(chi, otus, rdb)
if create_mapping_file:
create_map(chi, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def start_diversity_analysis(inFolder, outFolder, mapping_file, depth):
otus = asfolder(inFolder)
div = asfolder(outFolder + PR['Fdiv'])
corediv(inFolder=otus, outFolder=div, mappingFile=mapping_file, depth=depth)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Microbiome analysis using multiple methods
Version: %s
Date: %s """ % (__version__, __date__))
parser.add_argument("-i", # "--input",
dest="input",
# type=str,
help="the input sequences filepath (fastq files) [REQUIRED]",
metavar="Input folder",
required=True)
parser.add_argument("-o",
# "--output",
dest="output",
type=str,
metavar="Output folder",
help="the output directory [REQUIRED]",
required=True)
parser.add_argument("-t",
dest="trim_threshold",
type=int,
metavar="trim_phred_threshold",
help="phred quality threshold for trimming [default: 12]",
default=12)
parser.add_argument("-p",
type=int,
dest="fastq_p",
metavar="fastq-join p",
help="fastq-join's percentage of mismatch [default: 16]",
default=16)
parser.add_argument("--adapter",
metavar=None,
dest="adapter_reference",
help="Adapters reference file",
type=str)
parser.add_argument("-b",
dest="beginwith",
type=str,
metavar="starting step",
choices=['otu_picking', 'diversity_analysis', 'chimera_removal'],
help="starting the analysis in the middle: (otu_picking), (diversity_analysis), (chimera_removal)")
parser.add_argument("-s",
dest="stop_at",
type=str,
metavar="stop at",
choices = ['merging', 'quality_control','chimera_removal'],
help='terminate the analysis at this step [choices: (merging), (quality_control), (chimera_'
'removal))')
parser.add_argument("-j",
dest='joining_method',
help="choose the merging method (fastq-join) or (bbmerge) [default: fastq-join]",
type=str,
metavar="joining method",
choices = ['fastq-join', "bbmerge"],
default="fastq-join")
parser.add_argument("-m",
dest="maxloose",
help="Assign maxloose to be true for bbmerge [default: False]",
action="store_true")
parser.add_argument("-q",
dest="qc_threshold",
type=int,
metavar="quality control threshold",
help="quality control phred threshold [default: 19]",
default=19)
parser.add_argument("--continuation_reference",
dest="c_ref",
type=str,
metavar="newref_seq.fna",
help="reference sequence for continuation. If you want to continue analysis using the reference "
"data set from previous analysis. you can find it in the last sample otus folder new_refseqs.fna",
default="none")
parser.add_argument("--continuation_otu_id",
dest="c_otu_id",
type=str,
metavar=None,
help="continuation reference new otus ids",
default="New")
parser.add_argument("-r",
dest="rdb",
metavar="Reference database",
help="silva, greengenes [default: silva]",
choices=['silva', 'greengenes', 'unite'],
type=str,
default="silva")
parser.add_argument("-c",
dest="ConfigFile",
type=str,
metavar="Configuration file name",
default='qiime.cfg',
help="Configuration file name [default: qiime.cfg]")
parser.add_argument("-a",
dest="mapping_file",
help="Mapping file name",
metavar="Mapping file name",
type=str)
parser.add_argument("--parameter_file_name",
help="The name of the parameter file [if not assigned is automatically produced using "
"configuration file",
type=str,
metavar=None,
dest="parameter_file_name")
parser.add_argument("-n",
# "--number_of_cores",
help="Specify the number of jobs to start with [default: 2]",
type=int,
metavar='Number of jobs',
dest="number_of_cores",
default=2)
parser.add_argument("-e",
dest="depth",
type=int,
metavar="Sampling depth",
help="sampling depth for diversity analyses [default: 10000]",
default=10000)
parser.add_argument("--remove_intermediate_files",
help="To remove intermediate files, to reduce the disk space",
dest="remove_intermediate",
action="store_true")
# parser.add_argument("--decompress",
# help="Copy input files to outputfolder/fastq and decompress them",
# dest="decompress",
# action="store_true")
parser.add_argument("--ml",
dest="minimum_length",
metavar='Minimum length',
type=int,
help="Minimum length of reads kept after merging [default: 380]",
default=380)
parser.add_argument("--primer-trim-f",
dest="primertrim_forward",
metavar='Primer Trim',
type=int,
help="length of the forward primer [17]",
default=17)
parser.add_argument("--primer-trim-r",
dest="primertrim_reverse",
metavar='Primer Trim',
type=int,
help="length of the reverse primer [21]",
default=21)
#x = parser.format_usage()
#parser.usage = starting_message #+ x
arg = parser.parse_args()
PR.update({
'in_folder': asfolder(arg.input),
'out_folder': asfolder(arg.output),
# 'decompress': arg.aaa
# ress,
'rdb': arg.rdb,
'qcq': arg.qc_threshold,
'maxloose': arg.maxloose,
'trimq': arg.trim_threshold,
'joining_method': arg.joining_method,
'fastq_p': arg.fastq_p,
'depth': arg.depth,
'ConfigFile': arg.ConfigFile,
'parameter_file_name': arg.parameter_file_name,
'remove_intermediate': arg.remove_intermediate,
'beginwith': arg.beginwith,
'mapping_file': arg.mapping_file,
'adapter_ref': arg.adapter_reference,
'minimum_length': arg.minimum_length,
'c_ref': arg.c_ref,
'c_otu_id': arg.c_otu_id,
'primertrim_forward': arg.primertrim_forward,
'primertrim_reverse': arg.primertrim_reverse})
## parameter_file
get_configuration()
check_before_start()
if PR['rdb'] == 'unite':
PR['fungus'] = True
else:
PR['fungus'] = False
PR['others'] = asfolder(PR['out_folder'] + PR['Fothers'])
PR['number_of_cores'] = arg.number_of_cores
if PR['number_of_cores'] == 1:
PR['np'] = True
else:
PR['np'] = False
if (os.path.isdir(PR['out_folder'])):
sys.exit()
else:
os.mkdir(PR['out_folder'])
if not os.path.isdir(PR['others']):
os.mkdir(PR['others'])
logging.basicConfig(filename=PR['others'] + "log.txt",
format='%(levelname)s \n %(message)s',
level=logging.DEBUG)
loginfo('started')
[loginfo(str(P) + ": " + str(PR[P])) for P in PR]
# if PR['decompress']:
# copyfilesanddecompress(PR['in_folder'], asfolder(PR['out_folder']+"fastq"))
# PR['in_folder'] = asfolder(PR['out_folder'])+'fastq/'
if arg.parameter_file_name == None:
PR['parameter_file_name'] = PR['others'] + "para%s.txt" % PR['id']
write_parameter_file(PR['parameter_file_name'])
if arg.mapping_file == None:
create_mapping_file = True
PR['mapping_file'] = PR['others'] + "map.tsv"
else:
PR['mapping_file'] = arg.mapping_file
if (arg.beginwith == "diversity_analysis") and (arg.mapping_file == None):
pass
number_of_cores = PR['number_of_cores']
if arg.beginwith == "otu_picking":
start_otu_pickng(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
rdb=PR['rdb'],
depth=PR['depth'])
elif arg.beginwith == "diversity_analysis":
start_diversity_analysis(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
mapping_file=PR['mapping_file'],
depth=PR['depth'])
elif arg.beginwith == "chimera_removal":
start_at_chimera_removal(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
rdb= PR['rdb'],
depth=PR['depth'])
elif arg.stop_at == "chimera_removal":
stop_at_chimera_removal(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
rdb=PR['rdb'],
joining_method=PR['joining_method'],
fastq_p=PR['fastq_p'],
maxloose=PR['maxloose'],
qcq=PR['qcq'],
trimq=PR['trimq'])
elif arg.stop_at == "merging":
stop_at_merging(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
joining_method=PR['joining_method'],
fastq_p=PR['fastq_p'],
maxloose=PR['maxloose'],
trimq=PR['trimq'])
elif arg.stop_at == "quality_control":
stop_at_quality_control(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
joining_method=PR['joining_method'],
fastq_p=PR['fastq_p'],
maxloose=PR['maxloose'],
qcq=PR['qcq'],
trimq=PR['trimq'])
else:
full_analysis(inFolder=PR['in_folder'],
outFolder=PR['out_folder'],
rdb=PR['rdb'],
joining_method=PR['joining_method'],
fastq_p=PR['fastq_p'],
maxloose=PR['maxloose'],
qcq=PR['qcq'],
depth=PR['depth'],
trimq=PR['trimq'])
loginfo("Finished")
| {
"content_hash": "7ec3fd19297705d1c2aff741b2c676b0",
"timestamp": "",
"source": "github",
"line_count": 1057,
"max_line_length": 130,
"avg_line_length": 34.13907284768212,
"alnum_prop": 0.5391436885132327,
"repo_name": "Attayeb/qanaus",
"id": "eb102fea6ac54a2dae0d182a8a3b4914dafe2d0d",
"size": "36108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto-q.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26126"
}
],
"symlink_target": ""
} |
import pandas as pd
from unittest.mock import patch, ANY
from cartoframes.data.observatory.catalog.entity import CatalogList
from cartoframes.data.observatory.catalog.variable import Variable
from cartoframes.data.observatory.catalog.repository.variable_repo import VariableRepository
from cartoframes.data.observatory.catalog.repository.dataset_repo import DatasetRepository
from cartoframes.data.observatory.catalog.repository.constants import VARIABLE_FILTER
from .examples import test_datasets, test_variable1, test_variables, db_variable1, test_variable2, db_variable2
class TestVariable(object):
@patch.object(VariableRepository, 'get_by_id')
def test_get_variable_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = test_variable1
# When
variable = Variable.get(test_variable1.id)
# Then
assert isinstance(variable, object)
assert isinstance(variable, Variable)
assert variable == test_variable1
@patch.object(DatasetRepository, 'get_all')
def test_get_datasets_by_variable(self, mocked_repo):
# Given
mocked_repo.return_value = test_datasets
# When
datasets = test_variable1.datasets
# Then
mocked_repo.assert_called_once_with({VARIABLE_FILTER: test_variable1.id})
assert isinstance(datasets, list)
assert isinstance(datasets, CatalogList)
assert datasets == test_datasets
def test_variable_properties(self):
# Given
variable = Variable(db_variable1)
# When
variable_id = variable.id
slug = variable.slug
name = variable.name
description = variable.description
column_name = variable.column_name
db_type = variable.db_type
dataset = variable.dataset
agg_method = variable.agg_method
variable_group = variable.variable_group
summary = variable.summary
# Then
assert variable_id == db_variable1['id']
assert slug == db_variable1['slug']
assert name == db_variable1['name']
assert description == db_variable1['description']
assert column_name == db_variable1['column_name']
assert db_type == db_variable1['db_type']
assert dataset == db_variable1['dataset_id']
assert agg_method == db_variable1['agg_method']
assert variable_group == db_variable1['variable_group_id']
assert summary == db_variable1['summary_json']
def test_variable_is_exported_as_series(self):
# Given
variable = test_variable1
# When
variable_series = variable.to_series()
# Then
assert isinstance(variable_series, pd.Series)
assert variable_series['id'] == variable.id
def test_variable_is_exported_as_dict(self):
# Given
variable = Variable(db_variable1)
expected_dict = {key: value for key, value in db_variable1.items() if key != 'summary_json'}
# When
variable_dict = variable.to_dict()
# Then
assert isinstance(variable_dict, dict)
assert variable_dict == expected_dict
def test_variable_is_represented_with_slug_and_description(self):
# Given
variable = Variable(db_variable1)
# When
variable_repr = repr(variable)
# Then
assert variable_repr == "<Variable.get('{slug}')> #'{descr}'"\
.format(slug=db_variable1['slug'], descr=db_variable1['description'])
def test_variable_is_printed_with_classname(self):
# Given
variable = Variable(db_variable1)
# When
variable_str = str(variable)
# Then
assert variable_str == 'Variable({dict_str})'.format(dict_str=str(db_variable1))
@patch.object(VariableRepository, 'get_all')
def test_get_all_variables(self, mocked_repo):
# Given
mocked_repo.return_value = test_variables
# When
variables = Variable.get_all()
# Then
assert isinstance(variables, list)
assert isinstance(variables, CatalogList)
assert variables == test_variables
def test_variable_list_is_printed_correctly(self):
# Given
variables = CatalogList([test_variable1, test_variable2])
shorten_description = test_variable2.description[0:50] + '...'
# When
variables_str = str(variables)
# Then
assert variables_str == "[<Variable.get('{id1}')> #'{descr1}', <Variable.get('{id2}')> #'{descr2}']" \
.format(id1=db_variable1['slug'], descr1=db_variable1['description'],
id2=db_variable2['slug'], descr2=shorten_description)
def test_variable_list_is_represented_correctly(self):
# Given
variables = CatalogList([test_variable1, test_variable2])
shorten_description = test_variable2.description[0:50] + '...'
# When
variables_repr = repr(variables)
# Then
assert variables_repr == "[<Variable.get('{id1}')> #'{descr1}', <Variable.get('{id2}')> #'{descr2}']" \
.format(id1=db_variable1['slug'], descr1=db_variable1['description'],
id2=db_variable2['slug'], descr2=shorten_description)
def test_variables_items_are_obtained_as_variable(self):
# Given
variables = test_variables
# When
variable = variables[0]
# Then
assert isinstance(variable, Variable)
assert variable == test_variable1
def test_variables_are_exported_as_dataframe(self):
# Given
variables = test_variables
variable = variables[0]
expected_variable_df = variable.to_series()
del expected_variable_df['summary_json']
# When
variable_df = variables.to_dataframe()
sliced_variable = variable_df.iloc[0]
# Then
assert isinstance(variable_df, pd.DataFrame)
assert isinstance(sliced_variable, pd.Series)
assert sliced_variable.equals(expected_variable_df)
@patch.object(pd, 'set_option')
@patch.object(VariableRepository, 'get_all')
def test_summary_describe(self, mocked_repo, mocked_set):
# Given
variable = test_variables[0]
# When
variable.describe()
# Then
mocked_set.assert_called_once_with('display.float_format', ANY)
@patch.object(pd, 'set_option')
@patch.object(VariableRepository, 'get_all')
def test_summary_describe_custom_format(self, mocked_repo, mocked_set):
# Given
variable = test_variables[0]
# When
variable.describe(autoformat=False)
# Then
mocked_set.assert_not_called()
| {
"content_hash": "fd39c4e31dd290e29b113c7bfe233ede",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 111,
"avg_line_length": 33.74752475247525,
"alnum_prop": 0.6235880886020243,
"repo_name": "CartoDB/cartoframes",
"id": "15e5db28c52151127306c977d3c127aab5421836",
"size": "6817",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/data/observatory/catalog/test_variable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "51696"
},
{
"name": "Jinja",
"bytes": "18917"
},
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "773606"
}
],
"symlink_target": ""
} |
import asyncio
import pytest
from sanic import Sanic
from sanic.response import text
from sanic.router import RouteExists, RouteDoesNotExist
# ------------------------------------------------------------ #
# UTF-8
# ------------------------------------------------------------ #
def test_shorthand_routes_get():
app = Sanic('test_shorhand_routes_get')
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.post('/get')
assert response.status == 405
def test_route_strict_slash():
app = Sanic('test_route_strict_slash')
@app.get('/get', strict_slashes=True)
def handler(request):
return text('OK')
@app.post('/post/', strict_slashes=True)
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.get('/get/')
assert response.status == 404
request, response = app.test_client.post('/post/')
assert response.text == 'OK'
request, response = app.test_client.post('/post')
assert response.status == 404
def test_route_optional_slash():
app = Sanic('test_route_optional_slash')
@app.get('/get')
def handler(request):
return text('OK')
request, response = app.test_client.get('/get')
assert response.text == 'OK'
request, response = app.test_client.get('/get/')
assert response.text == 'OK'
def test_shorthand_routes_post():
app = Sanic('test_shorhand_routes_post')
@app.post('/post')
def handler(request):
return text('OK')
request, response = app.test_client.post('/post')
assert response.text == 'OK'
request, response = app.test_client.get('/post')
assert response.status == 405
def test_shorthand_routes_put():
app = Sanic('test_shorhand_routes_put')
@app.put('/put')
def handler(request):
return text('OK')
request, response = app.test_client.put('/put')
assert response.text == 'OK'
request, response = app.test_client.get('/put')
assert response.status == 405
def test_shorthand_routes_patch():
app = Sanic('test_shorhand_routes_patch')
@app.patch('/patch')
def handler(request):
return text('OK')
request, response = app.test_client.patch('/patch')
assert response.text == 'OK'
request, response = app.test_client.get('/patch')
assert response.status == 405
def test_shorthand_routes_head():
app = Sanic('test_shorhand_routes_head')
@app.head('/head')
def handler(request):
return text('OK')
request, response = app.test_client.head('/head')
assert response.status == 200
request, response = app.test_client.get('/head')
assert response.status == 405
def test_shorthand_routes_options():
app = Sanic('test_shorhand_routes_options')
@app.options('/options')
def handler(request):
return text('OK')
request, response = app.test_client.options('/options')
assert response.status == 200
request, response = app.test_client.get('/options')
assert response.status == 405
def test_static_routes():
app = Sanic('test_dynamic_route')
@app.route('/test')
async def handler1(request):
return text('OK1')
@app.route('/pizazz')
async def handler2(request):
return text('OK2')
request, response = app.test_client.get('/test')
assert response.text == 'OK1'
request, response = app.test_client.get('/pizazz')
assert response.text == 'OK2'
def test_dynamic_route():
app = Sanic('test_dynamic_route')
results = []
@app.route('/folder/<name>')
async def handler(request, name):
results.append(name)
return text('OK')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
def test_dynamic_route_string():
app = Sanic('test_dynamic_route_string')
results = []
@app.route('/folder/<name:string>')
async def handler(request, name):
results.append(name)
return text('OK')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
request, response = app.test_client.get('/folder/favicon.ico')
assert response.text == 'OK'
assert results[1] == 'favicon.ico'
def test_dynamic_route_int():
app = Sanic('test_dynamic_route_int')
results = []
@app.route('/folder/<folder_id:int>')
async def handler(request, folder_id):
results.append(folder_id)
return text('OK')
request, response = app.test_client.get('/folder/12345')
assert response.text == 'OK'
assert type(results[0]) is int
request, response = app.test_client.get('/folder/asdf')
assert response.status == 404
def test_dynamic_route_number():
app = Sanic('test_dynamic_route_number')
results = []
@app.route('/weight/<weight:number>')
async def handler(request, weight):
results.append(weight)
return text('OK')
request, response = app.test_client.get('/weight/12345')
assert response.text == 'OK'
assert type(results[0]) is float
request, response = app.test_client.get('/weight/1234.56')
assert response.status == 200
request, response = app.test_client.get('/weight/1234-56')
assert response.status == 404
def test_dynamic_route_regex():
app = Sanic('test_dynamic_route_regex')
@app.route('/folder/<folder_id:[A-Za-z0-9]{0,4}>')
async def handler(request, folder_id):
return text('OK')
request, response = app.test_client.get('/folder/test')
assert response.status == 200
request, response = app.test_client.get('/folder/test1')
assert response.status == 404
request, response = app.test_client.get('/folder/test-123')
assert response.status == 404
request, response = app.test_client.get('/folder/')
assert response.status == 200
def test_dynamic_route_path():
app = Sanic('test_dynamic_route_path')
@app.route('/<path:path>/info')
async def handler(request, path):
return text('OK')
request, response = app.test_client.get('/path/1/info')
assert response.status == 200
request, response = app.test_client.get('/info')
assert response.status == 404
@app.route('/<path:path>')
async def handler1(request, path):
return text('OK')
request, response = app.test_client.get('/info')
assert response.status == 200
request, response = app.test_client.get('/whatever/you/set')
assert response.status == 200
def test_dynamic_route_unhashable():
app = Sanic('test_dynamic_route_unhashable')
@app.route('/folder/<unhashable:[A-Za-z0-9/]+>/end/')
async def handler(request, unhashable):
return text('OK')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/nope/')
assert response.status == 404
def test_websocket_route():
app = Sanic('test_websocket_route')
ev = asyncio.Event()
@app.websocket('/ws')
async def handler(request, ws):
ev.set()
request, response = app.test_client.get('/ws', headers={
'Upgrade': 'websocket',
'Connection': 'upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13'})
assert response.status == 101
assert ev.is_set()
def test_route_duplicate():
app = Sanic('test_route_duplicate')
with pytest.raises(RouteExists):
@app.route('/test')
async def handler1(request):
pass
@app.route('/test')
async def handler2(request):
pass
with pytest.raises(RouteExists):
@app.route('/test/<dynamic>/')
async def handler1(request, dynamic):
pass
@app.route('/test/<dynamic>/')
async def handler2(request, dynamic):
pass
def test_method_not_allowed():
app = Sanic('test_method_not_allowed')
@app.route('/test', methods=['GET'])
async def handler(request):
return text('OK')
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.post('/test')
assert response.status == 405
def test_static_add_route():
app = Sanic('test_static_add_route')
async def handler1(request):
return text('OK1')
async def handler2(request):
return text('OK2')
app.add_route(handler1, '/test')
app.add_route(handler2, '/test2')
request, response = app.test_client.get('/test')
assert response.text == 'OK1'
request, response = app.test_client.get('/test2')
assert response.text == 'OK2'
def test_dynamic_add_route():
app = Sanic('test_dynamic_add_route')
results = []
async def handler(request, name):
results.append(name)
return text('OK')
app.add_route(handler, '/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
def test_dynamic_add_route_string():
app = Sanic('test_dynamic_add_route_string')
results = []
async def handler(request, name):
results.append(name)
return text('OK')
app.add_route(handler, '/folder/<name:string>')
request, response = app.test_client.get('/folder/test123')
assert response.text == 'OK'
assert results[0] == 'test123'
request, response = app.test_client.get('/folder/favicon.ico')
assert response.text == 'OK'
assert results[1] == 'favicon.ico'
def test_dynamic_add_route_int():
app = Sanic('test_dynamic_add_route_int')
results = []
async def handler(request, folder_id):
results.append(folder_id)
return text('OK')
app.add_route(handler, '/folder/<folder_id:int>')
request, response = app.test_client.get('/folder/12345')
assert response.text == 'OK'
assert type(results[0]) is int
request, response = app.test_client.get('/folder/asdf')
assert response.status == 404
def test_dynamic_add_route_number():
app = Sanic('test_dynamic_add_route_number')
results = []
async def handler(request, weight):
results.append(weight)
return text('OK')
app.add_route(handler, '/weight/<weight:number>')
request, response = app.test_client.get('/weight/12345')
assert response.text == 'OK'
assert type(results[0]) is float
request, response = app.test_client.get('/weight/1234.56')
assert response.status == 200
request, response = app.test_client.get('/weight/1234-56')
assert response.status == 404
def test_dynamic_add_route_regex():
app = Sanic('test_dynamic_route_int')
async def handler(request, folder_id):
return text('OK')
app.add_route(handler, '/folder/<folder_id:[A-Za-z0-9]{0,4}>')
request, response = app.test_client.get('/folder/test')
assert response.status == 200
request, response = app.test_client.get('/folder/test1')
assert response.status == 404
request, response = app.test_client.get('/folder/test-123')
assert response.status == 404
request, response = app.test_client.get('/folder/')
assert response.status == 200
def test_dynamic_add_route_unhashable():
app = Sanic('test_dynamic_add_route_unhashable')
async def handler(request, unhashable):
return text('OK')
app.add_route(handler, '/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/nope/')
assert response.status == 404
def test_add_route_duplicate():
app = Sanic('test_add_route_duplicate')
with pytest.raises(RouteExists):
async def handler1(request):
pass
async def handler2(request):
pass
app.add_route(handler1, '/test')
app.add_route(handler2, '/test')
with pytest.raises(RouteExists):
async def handler1(request, dynamic):
pass
async def handler2(request, dynamic):
pass
app.add_route(handler1, '/test/<dynamic>/')
app.add_route(handler2, '/test/<dynamic>/')
def test_add_route_method_not_allowed():
app = Sanic('test_add_route_method_not_allowed')
async def handler(request):
return text('OK')
app.add_route(handler, '/test', methods=['GET'])
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.post('/test')
assert response.status == 405
def test_remove_static_route():
app = Sanic('test_remove_static_route')
async def handler1(request):
return text('OK1')
async def handler2(request):
return text('OK2')
app.add_route(handler1, '/test')
app.add_route(handler2, '/test2')
request, response = app.test_client.get('/test')
assert response.status == 200
request, response = app.test_client.get('/test2')
assert response.status == 200
app.remove_route('/test')
app.remove_route('/test2')
request, response = app.test_client.get('/test')
assert response.status == 404
request, response = app.test_client.get('/test2')
assert response.status == 404
def test_remove_dynamic_route():
app = Sanic('test_remove_dynamic_route')
async def handler(request, name):
return text('OK')
app.add_route(handler, '/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.status == 200
app.remove_route('/folder/<name>')
request, response = app.test_client.get('/folder/test123')
assert response.status == 404
def test_remove_inexistent_route():
app = Sanic('test_remove_inexistent_route')
with pytest.raises(RouteDoesNotExist):
app.remove_route('/test')
def test_removing_slash():
app = Sanic(__name__)
@app.get('/rest/<resource>')
def get(_):
pass
@app.post('/rest/<resource>')
def post(_):
pass
assert len(app.router.routes_all.keys()) == 2
def test_remove_unhashable_route():
app = Sanic('test_remove_unhashable_route')
async def handler(request, unhashable):
return text('OK')
app.add_route(handler, '/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 200
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 200
app.remove_route('/folder/<unhashable:[A-Za-z0-9/]+>/end/')
request, response = app.test_client.get('/folder/test/asdf/end/')
assert response.status == 404
request, response = app.test_client.get('/folder/test///////end/')
assert response.status == 404
request, response = app.test_client.get('/folder/test/end/')
assert response.status == 404
def test_remove_route_without_clean_cache():
app = Sanic('test_remove_static_route')
async def handler(request):
return text('OK')
app.add_route(handler, '/test')
request, response = app.test_client.get('/test')
assert response.status == 200
app.remove_route('/test', clean_cache=True)
request, response = app.test_client.get('/test')
assert response.status == 404
app.add_route(handler, '/test')
request, response = app.test_client.get('/test')
assert response.status == 200
app.remove_route('/test', clean_cache=False)
request, response = app.test_client.get('/test')
assert response.status == 200
def test_overload_routes():
app = Sanic('test_dynamic_route')
@app.route('/overload', methods=['GET'])
async def handler1(request):
return text('OK1')
@app.route('/overload', methods=['POST', 'PUT'])
async def handler2(request):
return text('OK2')
request, response = app.test_client.get('/overload')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload')
assert response.text == 'OK2'
request, response = app.test_client.put('/overload')
assert response.text == 'OK2'
request, response = app.test_client.delete('/overload')
assert response.status == 405
with pytest.raises(RouteExists):
@app.route('/overload', methods=['PUT', 'DELETE'])
async def handler3(request):
return text('Duplicated')
def test_unmergeable_overload_routes():
app = Sanic('test_dynamic_route')
@app.route('/overload_whole', methods=None)
async def handler1(request):
return text('OK1')
with pytest.raises(RouteExists):
@app.route('/overload_whole', methods=['POST', 'PUT'])
async def handler2(request):
return text('Duplicated')
request, response = app.test_client.get('/overload_whole')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload_whole')
assert response.text == 'OK1'
@app.route('/overload_part', methods=['GET'])
async def handler1(request):
return text('OK1')
with pytest.raises(RouteExists):
@app.route('/overload_part')
async def handler2(request):
return text('Duplicated')
request, response = app.test_client.get('/overload_part')
assert response.text == 'OK1'
request, response = app.test_client.post('/overload_part')
assert response.status == 405
| {
"content_hash": "7ee8e1904195a2c3b4be06147df8b76f",
"timestamp": "",
"source": "github",
"line_count": 697,
"max_line_length": 70,
"avg_line_length": 26.24390243902439,
"alnum_prop": 0.629400830964356,
"repo_name": "jrocketfingers/sanic",
"id": "b3e193550f73b24577b62bd99b157dbdf62fc63b",
"size": "18292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "482"
},
{
"name": "Makefile",
"bytes": "108"
},
{
"name": "Python",
"bytes": "231242"
}
],
"symlink_target": ""
} |
"""
A spider that generate light requests to meassure QPS troughput
usage:
scrapy runspider qpsclient.py --loglevel=INFO --set RANDOMIZE_DOWNLOAD_DELAY=0 --set CONCURRENT_REQUESTS=50 -a qps=10 -a latency=0.3
"""
from ants.spider import Spider
from ants.http import Request
class QPSSpider(Spider):
name = 'qps'
benchurl = 'http://localhost:8880/'
# Max concurrency is limited by global CONCURRENT_REQUESTS setting
max_concurrent_requests = 8
# Requests per second goal
qps = None # same as: 1 / download_delay
download_delay = None
# time in seconds to delay server responses
latency = None
# number of slots to create
slots = 1
def __init__(self, *a, **kw):
super(QPSSpider, self).__init__(*a, **kw)
if self.qps is not None:
self.qps = float(self.qps)
self.download_delay = 1 / self.qps
elif self.download_delay is not None:
self.download_delay = float(self.download_delay)
def start_requests(self):
url = self.benchurl
if self.latency is not None:
url += '?latency={0}'.format(self.latency)
slots = int(self.slots)
if slots > 1:
urls = [url.replace('localhost', '127.0.0.%d' % (x + 1)) for x in xrange(slots)]
else:
urls = [url]
idx = 0
while True:
url = urls[idx % len(urls)]
yield Request(url, dont_filter=True)
idx += 1
def parse(self, response):
pass
| {
"content_hash": "7fe27a92487ca0baeb52c45aa2f075fd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 136,
"avg_line_length": 27.745454545454546,
"alnum_prop": 0.5943643512450852,
"repo_name": "wcong/ants",
"id": "f399f7aa38285299a94de567adbf9fa214f42306",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extras/qpsclient.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "721345"
},
{
"name": "Shell",
"bytes": "1644"
}
],
"symlink_target": ""
} |
from numbers import Number
import theano
from theano import tensor as TT
import numpy as np
from . import origin
class Input(object):
"""Inputs are objects that provide real-valued input to ensembles.
Any callable can be used an input function.
"""
def __init__(self, name, values, zero_after_time=None):
"""
:param string name: name of the function input
:param value: defines the output decoded_output
:type value: float or function
:param float zero_after_time:
time after which to set function output = 0 (s)
"""
self.name = name
self.t = 0
self.function = None
self.zero_after_time = zero_after_time
self.zeroed = False
self.change_time = None
self.origin = {}
# if value parameter is a python function
if callable(values):
self.origin['X'] = origin.Origin(func=values)
# if value is dict of time:value pairs
elif isinstance(values, dict):
self.change_time = sorted(values.keys())[0]
# check for size of dict elements
if isinstance(values[self.change_time], list):
initial_value = np.zeros(len(values[self.change_time]))
else: initial_value = np.zeros(1)
self.origin['X'] = origin.Origin(func=None,
initial_value=initial_value)
self.values = values
else:
self.origin['X'] = origin.Origin(func=None, initial_value=values)
def reset(self):
"""Resets the function output state values.
"""
self.zeroed = False
def theano_tick(self):
"""Move function input forward in time.
"""
if self.zeroed:
return
# zero output
if self.zero_after_time is not None and self.t > self.zero_after_time:
self.origin['X'].decoded_output.set_value(
np.float32(np.zeros(self.origin['X'].dimensions)))
self.zeroed = True
# change value
if self.change_time is not None and self.t > self.change_time:
self.origin['X'].decoded_output.set_value(
np.float32(np.array([self.values[self.change_time]])))
index = sorted(self.values.keys()).index(self.change_time)
if index < len(self.values) - 1:
self.change_time = sorted(self.values.keys())[index+1]
else: self.change_time = None
# update output decoded_output
if self.origin['X'].func is not None:
values = self.origin['X'].func(self.t)
# if value is a scalar output, make it a list
if isinstance(values, Number):
values = [values]
# cast as float32 for consistency / speed,
# but _after_ it's been made a list
self.origin['X'].decoded_output.set_value(np.float32(values))
| {
"content_hash": "43e5bb740be775b446d40f97e21c12e0",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 34.13793103448276,
"alnum_prop": 0.5707070707070707,
"repo_name": "ctn-waterloo/nengo_theano",
"id": "9a98f7cd84aef1fa35658b0078c6b2c50c23d896",
"size": "2970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nengo_theano/input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "233870"
},
{
"name": "Shell",
"bytes": "5096"
}
],
"symlink_target": ""
} |
''' Cardinal - An Open Source Cisco Wireless Access Point Controller
MIT License
Copyright © 2019 Cardinal Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import MySQLdb
import os
import subprocess
from configparser import ConfigParser
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import session
from flask import url_for
from werkzeug.security import check_password_hash
# SYSTEM VARIABLES
cardinalConfig = os.environ['CARDINALCONFIG']
logging.basicConfig(filename='/var/log/cardinal/cardinal.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
# FLASK APP INITIALIZATION
Cardinal = Flask(__name__)
Cardinal.secret_key = "SECRET_KEY_HERE"
# MySQL AUTHENTICATION & HANDLING
mysqlConfig = ConfigParser()
mysqlConfig.read("{}".format(cardinalConfig))
mysqlHost = mysqlConfig.get('cardinal', 'dbserver')
mysqlUser = mysqlConfig.get('cardinal', 'username')
mysqlPass = mysqlConfig.get('cardinal', 'password')
mysqlDb = mysqlConfig.get('cardinal', 'dbname')
def cardinalSql():
conn = MySQLdb.connect(host = mysqlHost, user = mysqlUser, passwd = mysqlPass, db = mysqlDb)
return conn
# CARDINAL FLASK ROUTES
@Cardinal.route("/")
def index():
if session.get("username") is not None:
return redirect(url_for('dashboard'))
else:
return render_template("index.html")
@Cardinal.route("/dashboard")
def dashboard():
if session.get("username") is not None:
return render_template("dashboard.html")
else:
return redirect(url_for('index'))
@Cardinal.route("/login", methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
conn = cardinalSql()
loginCursor = conn.cursor()
loginSql = loginCursor.execute("SELECT username,password FROM users WHERE username = '{}'".format(username))
userInfo = loginCursor.fetchall()
loginCursor.close()
conn.close()
if loginSql > 0:
for info in userInfo:
dbUsername = info[0]
dbHash = info[1]
else:
logging.warning("Unauthorized access detected. Someone tried logging into Cardinal but was unsuccessful.")
return 'Authentication failed. Please check your credentials and try again by clicking <a href="/">here</a>.'
if check_password_hash(dbHash,password):
session['username'] = username
return redirect(url_for('dashboard'))
elif dbUsername is None:
logging.warning("Unauthorized access detected. Someone tried logging into Cardinal but was unsuccessful.")
return 'Authentication failed. Please check your credentials and try again by clicking <a href="/">here</a>.'
else:
logging.warning("Unauthorized access detected. Someone tried logging into Cardinal but was unsuccessful.")
return 'Authentication failed. Please check your credentials and try again by clicking <a href="/">here</a>.'
@Cardinal.route("/logout")
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@Cardinal.route("/add-ap", methods=["GET"])
def addAp():
if session.get("username") is not None:
status = request.args.get('status')
conn = cardinalSql()
apGroupCursor = conn.cursor()
apGroupCursor.execute("SELECT ap_group_id,ap_group_name FROM access_point_groups")
apGroups = apGroupCursor.fetchall()
apGroupCursor.close()
conn.close()
return render_template("add-ap.html", status=status, apGroups=apGroups)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-add-ap", methods=["POST"])
def doAddAp():
if request.method == 'POST':
apName = request.form["ap_name"]
apIp = request.form["ap_ip"]
apSshUsername = request.form["ssh_username"]
apSshPassword = request.form["ssh_password"]
apGroupId = request.form["group_id"]
apSnmp = request.form["ap_snmp"]
status = "Success! {} was successfully registered!".format(apName)
conn = cardinalSql()
addApCursor = conn.cursor()
addApCursor.execute("INSERT INTO access_points (ap_name, ap_ip, ap_ssh_username, ap_ssh_password, ap_snmp, ap_group_id) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}')".format(apName, apIp, apSshUsername, apSshPassword, apSnmp, apGroupId))
addApCursor.close()
conn.commit()
conn.close()
return redirect(url_for('addAp', status=status))
@Cardinal.route("/delete-ap", methods=["GET"])
def deleteAp():
if session.get("username") is not None:
status = request.args.get('status')
conn = cardinalSql()
apCursor = conn.cursor()
apCursor.execute("SELECT ap_id,ap_name FROM access_points")
aps = apCursor.fetchall()
apCursor.close()
conn.close()
return render_template("delete-ap.html", aps=aps, status=status)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ap", methods=["POST"])
def doDeleteAp():
if request.method == 'POST':
apId = request.form["ap_id"]
conn = cardinalSql()
deleteApNameCursor = conn.cursor()
deleteApNameCursor.execute("SELECT ap_name FROM access_points WHERE ap_id = '{}'".format(apId))
apName = deleteApNameCursor.fetchone()[0]
deleteApNameCursor.close()
status = "Success! {} was successfully registered!".format(apName)
try:
deleteApCursor = conn.cursor()
deleteApCursor.execute("DELETE FROM access_points WHERE ap_id = '{}'".format(apId))
deleteApCursor.close()
except MySQLdb.Error as e:
status = e
finally:
conn.commit()
conn.close()
return redirect(url_for('deleteAp', status=status))
@Cardinal.route("/add-ap-group", methods=["GET"])
def addApGroup():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("add-ap-group.html", status=status)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-add-ap-group", methods=["POST"])
def doAddApGroup():
if request.method == 'POST':
apGroupName = request.form["ap_group_name"]
status = "Success! {} was successfully registered!".format(apGroupName)
conn = cardinalSql()
addApGroupCursor = conn.cursor()
addApGroupCursor.execute("INSERT INTO access_point_groups (ap_group_name) VALUES ('{}')".format(apGroupName))
addApGroupCursor.close()
conn.commit()
conn.close()
return render_template('add-ap-group.html', status=status)
@Cardinal.route("/delete-ap-group", methods=["GET"])
def deleteApGroup():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deleteApGroupCursor = conn.cursor()
deleteApGroupCursor.execute("SELECT ap_group_id,ap_group_name FROM access_point_groups")
apGroups = deleteApGroupCursor.fetchall()
deleteApGroupCursor.close()
conn.close()
return render_template("delete-ap-group.html", status=status, apGroups=apGroups)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ap-group", methods=["POST"])
def doDeleteApGroup():
if request.method == 'POST':
apGroupId = request.form["ap_group_id"]
conn = cardinalSql()
deleteApGroupNameCursor = conn.cursor()
deleteApGroupNameCursor.execute("SELECT ap_group_name FROM access_point_groups WHERE ap_group_id = '{}'".format(apGroupId))
apGroupName = deleteApGroupNameCursor.fetchone()[0]
status = "Success! {} was successfully deleted!".format(apGroupName)
deleteApGroupCursor = conn.cursor()
deleteApGroupCursor.execute("DELETE FROM access_point_groups WHERE ap_group_id = '{}'".format(apGroupId))
conn.commit()
conn.close()
return redirect(url_for('deleteApGroup', status=status))
@Cardinal.route("/network-tools", methods=["GET"])
def networkTools():
if session.get("username") is not None:
return render_template("network-tools.html")
else:
return redirect(url_for('index'))
@Cardinal.route("/tools-output", methods=["GET"])
def networkToolsOutput():
if session.get("username") is not None:
commandOutput = request.args.get("commandOutput")
return render_template("network-tools-output.html", commandOutput=commandOutput)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-nmap", methods=["POST"])
def doNmap():
if request.method == 'POST':
ip = request.form["network_ip"]
commandOutput = subprocess.check_output("nmap -v -A {}".format(ip), shell=True)
return redirect(url_for('networkToolsOutput', commandOutput=commandOutput))
@Cardinal.route("/do-ping", methods=["POST"])
def doPing():
if request.method == 'POST':
ip = request.form["network_ip"]
commandOutput = subprocess.check_output("ping -c 4 {}".format(ip), shell=True)
return redirect(url_for('networkToolsOutput', commandOutput=commandOutput))
@Cardinal.route("/do-tracert", methods=["POST"])
def doTracert():
if request.method == 'POST':
ip = request.form["network_ip"]
commandOutput = subprocess.check_output("traceroute {}".format(ip), shell=True)
return redirect(url_for('networkToolsOutput', commandOutput=commandOutput))
@Cardinal.route("/do-dig", methods=["POST"])
def doDig():
if request.method == 'POST':
ip = request.form["network_ip"]
commandOutput = subprocess.check_output("dig {}".format(ip), shell=True)
return redirect(url_for('networkToolsOutput', commandOutput=commandOutput))
@Cardinal.route("/do-curl", methods=["POST"])
def doCurl():
if request.method == 'POST':
ip = request.form["network_ip"]
commandOutput = subprocess.check_output("curl -I {}".format(ip), shell=True)
return redirect(url_for('networkToolsOutput', commandOutput=commandOutput))
@Cardinal.route("/choose-ap-dashboard", methods=["GET"])
def chooseApDashboard():
if session.get("username") is not None:
conn = cardinalSql()
apCursor = conn.cursor()
apCursor.execute("SELECT ap_id,ap_name FROM access_points")
aps = apCursor.fetchall()
apCursor.close()
conn.close()
return render_template("choose-ap-dashboard.html", aps=aps)
else:
return redirect(url_for('index'))
@Cardinal.route("/manage-ap-dashboard", methods=["POST"])
def manageApDashboard():
if request.method == 'POST':
apId = request.form["ap_id"]
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_total_clients,ap_bandwidth FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
conn.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apTotalClients = info[2]
apBandwidth = info[3]
session['apId'] = apId
session['apName'] = apName
session['apIp'] = apIp
session['apTotalClients'] = apTotalClients
session['apBandwidth'] = apBandwidth
return render_template("manage-ap-dashboard.html")
else:
return redirect(url_for('index'))
@Cardinal.route("/choose-ap-group-dashboard", methods=["GET"])
def chooseApGroupDashboard():
if session.get("username") is not None:
conn = cardinalSql()
apGroupCursor = conn.cursor()
apGroupCursor.execute("SELECT ap_group_id,ap_group_name FROM access_point_groups")
apGroups = apGroupCursor.fetchall()
apGroupCursor.close()
conn.close()
return render_template("choose-ap-group-dashboard.html", apGroups=apGroups)
else:
return redirect(url_for('index'))
@Cardinal.route("/manage-ap-group-dashboard", methods=["POST"])
def manageApGroupDashboard():
if request.method == 'POST':
apGroupId = request.form["ap_group_id"]
conn = cardinalSql()
apGroupInfoCursor = conn.cursor()
apGroupInfoCursor.execute("SELECT ap_group_name FROM access_point_groups WHERE ap_group_id = '{}'".format(apGroupId))
apGroupInfo = apGroupInfoCursor.fetchall()
apGroupInfoCursor.close()
for info in apGroupInfo:
apGroupName = info[0]
session['apGroupId'] = apGroupId
session['apGroupName'] = apGroupName
return render_template("manage-ap-group-dashboard.html")
else:
return redirect(url_for('index'))
@Cardinal.route("/config-ap-ip", methods=["GET"])
def configApIp():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("config-ap-ip.html", status=status)
@Cardinal.route("/do-config-ap-ip", methods=["POST"])
def doConfigApIp():
if request.method == 'POST':
apId = session.get('apId')
apNewIp = request.form["ap_new_ip"]
apSubnetMask = request.form["ap_subnetmask"]
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --change-ip {0} {1} {2} {3} {4}".format(apIp,apSshUsername,apSshPassword,apNewIp,apSubnetMask), shell=True)
status = "{}'s IP was successfully updated!".format(apName)
sqlChangeApIpCursor = conn.cursor()
sqlChangeApIpCursor.execute("UPDATE access_points SET ap_ip = '{0}' WHERE ap_id = '{1}'".format(apNewIp,apId))
sqlChangeApIpCursor.close()
conn.close()
return redirect(url_for('configApIp', status=status))
@Cardinal.route("/config-ap-name", methods=["GET"])
def configApName():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("config-ap-name.html", status=status)
@Cardinal.route("/do-config-ap-name", methods=["POST"])
def doConfigApName():
if request.method == 'POST':
apId = session.get('apId')
apNewName = request.form["ap_name"]
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --change-name {0} {1} {2} {3}".format(apIp,apSshUsername,apSshPassword,apNewName), shell=True)
status = "AP Name Changed from {0} to {1}".format(apName,apNewName)
sqlChangeApNameCursor = conn.cursor()
sqlChangeApNameCursor.execute("UPDATE access_points SET ap_name = '{0}' WHERE ap_id = '{1}'".format(apName,apId))
sqlChangeApNameCursor.close()
conn.close()
return redirect(url_for('configApName', status=status))
@Cardinal.route("/manage-ap-tftp-backup", methods=["GET"])
def manageApTftpBackup():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("manage-ap-tftp-backup.html", status=status)
@Cardinal.route("/manage-ap-tftp-group-backup", methods=["GET"])
def manageApTftpGroupBackup():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("manage-ap-tftp-group-backup.html", status=status)
@Cardinal.route("/do-ap-tftp-backup", methods=["POST"])
def doApTftpBackup():
if request.method == 'POST':
apId = session.get('apId')
tftpIp = request.form["tftp_ip"]
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --tftp-backup {0} {1} {2} {3}".format(apIp,apSshUsername,apSshPassword,tftpIp), shell=True)
status = "TFTP Config Backup for {} Successfully Initiated!".format(apName)
conn.close()
if request.form["group_backup"] == 'True':
apGroupId = session.get('apGroupId', None)
apGroupName = session.get('apGroupName', None)
tftpIp = request.form["tftp_ip"]
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_group_id = '{}'".format(apGroupId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apGroupName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --tftp-backup {0} {1} {2} {3}".format(apIp,apSshUsername,apSshPassword,tftpIp), shell=True)
status = "TFTP Config Backup for {} Successfully Initiated!".format(apGroupName)
conn.close()
return redirect(url_for('manageApTftpBackupGroup', status=status))
return redirect(url_for('manageApTftpBackup', status=status))
@Cardinal.route("/config-ap-http", methods=["GET"])
def configApHttp():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("config-ap-http.html", status=status)
@Cardinal.route("/do-enable-ap-http", methods=["POST"])
def doEnableApHttp():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --enable-http {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "HTTP Server for {} Successfully Enabled".format(apName)
conn.close()
return redirect(url_for('configApHttp', status=status))
@Cardinal.route("/do-disable-ap-http", methods=["POST"])
def doDisableApHttp():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --disable-http {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "HTTP Server for {} Successfully Disabled".format(apName)
conn.close()
return redirect(url_for('configApHttp', status=status))
@Cardinal.route("/config-ap-radius", methods=["GET"])
def configApRadius():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("config-ap-radius.html", status=status)
@Cardinal.route("/do-enable-ap-radius", methods=["POST"])
def doEnableApRadius():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --enable-radius {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "RADIUS for {} Successfully Enabled".format(apName)
conn.close()
return redirect(url_for('configApRadius', status=status))
@Cardinal.route("/do-disable-ap-http", methods=["POST"])
def doDisableApRadius():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --disable-radius {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "RADIUS Server for {} Successfully Disabled".format(apName)
conn.close()
return redirect(url_for('configApRadius', status=status))
@Cardinal.route("/config-ap-snmp", methods=["GET"])
def configApSnmp():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("config-ap-snmp.html", status=status)
@Cardinal.route("/do-enable-ap-snmp", methods=["POST"])
def doEnableApSnmp():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --enable-snmp {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "SNMP for {} Successfully Enabled".format(apName)
conn.close()
return redirect(url_for('configApSnmp', status=status))
@Cardinal.route("/do-disable-ap-snmp", methods=["POST"])
def doDisableApSnmp():
if request.method == 'POST':
apId = session.get('apId')
conn = cardinalSql()
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --disable-snmp {0} {1} {2}".format(apIp,apSshUsername,apSshPassword), shell=True)
status = "SNMP Server for {} Successfully Disabled".format(apName)
conn.close()
return redirect(url_for('configApSnmp', status=status))
@Cardinal.route("/add-ssids", methods=["GET"])
def addSsids():
if session.get("username") is not None:
return render_template("add-ssids.html")
@Cardinal.route("/add-ssid-24ghz", methods=["GET"])
def addSsid24Ghz():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("add-ssid-24ghz.html", status=status)
@Cardinal.route("/do-add-ssid-24ghz", methods=["POST"])
def doAddSsid24Ghz():
if request.method == 'POST':
ssidName = request.form["ssid_name"]
vlan = request.form["vlan"]
wpa2Psk = request.form["wpa2_psk"]
bridgeGroup = request.form["bridge_group_id"]
radioId = request.form["radio_sub_id"]
gigaId = request.form["giga_sub_id"]
conn = cardinalSql()
addSsid24GhzCursor = conn.cursor()
addSsid24GhzCursor.execute("INSERT INTO ssids_24ghz (ap_ssid_name, ap_ssid_vlan, ap_ssid_wpa2, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}')".format(ssidName,vlan,wpa2Psk,bridgeGroup,radioId,gigaId))
addSsid24GhzCursor.close()
conn.commit()
conn.close()
status = "Success! {} was successfully registered!".format(ssidName)
return redirect(url_for('addSsid24Ghz', status=status))
@Cardinal.route("/add-ssid-5ghz", methods=["GET"])
def addSsid5Ghz():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("add-ssid-5ghz.html", status=status)
@Cardinal.route("/do-add-ssid-5ghz", methods=["POST"])
def doAddSsid5Ghz():
if request.method == 'POST':
ssidName = request.form["ssid_name"]
vlan = request.form["vlan"]
wpa2Psk = request.form["wpa2_psk"]
bridgeGroup = request.form["bridge_group_id"]
radioId = request.form["radio_sub_id"]
gigaId = request.form["giga_sub_id"]
conn = cardinalSql()
addSsid5GhzCursor = conn.cursor()
addSsid5GhzCursor.execute("INSERT INTO ssids_5ghz (ap_ssid_name, ap_ssid_vlan, ap_ssid_wpa2, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}')".format(ssidName,vlan,wpa2Psk,bridgeGroup,radioId,gigaId))
addSsid5GhzCursor.close()
conn.commit()
conn.close()
status = "Success! {} was successfully registered!".format(ssidName)
return redirect(url_for('addSsid5Ghz', status=status))
@Cardinal.route("/add-ssid-24ghz-radius", methods=["GET"])
def addSsid24GhzRadius():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("add-ssid-24ghz-radius.html", status=status)
@Cardinal.route("/do-add-ssid-24ghz-radius", methods=["POST"])
def doAddSsid24GhzRadius():
if request.method == 'POST':
ssidName = request.form["ssid_name"]
vlan = request.form["vlan"]
bridgeGroup = request.form["bridge_group_id"]
radioId = request.form["radio_sub_id"]
gigaId = request.form["giga_sub_id"]
radiusIp = request.form["radius_ip"]
sharedSecret = request.form["shared_secret"]
authPort = request.form["auth_port"]
acctPort = request.form["acct_port"]
radiusTimeout = request.form["radius_timeout"]
radiusGroup = request.form["radius_group"]
methodList = request.form["method_list"]
conn = cardinalSql()
addSsid24GhzRadiusCursor = conn.cursor()
addSsid24GhzRadiusCursor.execute("INSERT INTO ssids_24ghz_radius (ap_ssid_name, ap_ssid_vlan, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id, ap_ssid_radius_server, ap_ssid_radius_secret, ap_ssid_authorization_port, ap_ssid_accounting_port, ap_ssid_radius_timeout, ap_ssid_radius_group, ap_ssid_radius_method_list) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}')".format(ssidName,vlan,bridgeGroup,radioId,gigaId,radiusIp,sharedSecret,authPort,acctPort,radiusTimeout,radiusGroup,methodList))
addSsid24GhzRadiusCursor.close()
conn.commit()
conn.close()
status = "Success! {} was successfully registered!".format(ssidName)
return redirect(url_for('addSsid24GhzRadius', status=status))
@Cardinal.route("/add-ssid-5ghz-radius", methods=["GET"])
def addSsid5GhzRadius():
if session.get("username") is not None:
status = request.args.get('status')
return render_template("add-ssid-5ghz-radius.html", status=status)
@Cardinal.route("/do-add-ssid-5ghz-radius", methods=["POST"])
def doAddSsid5GhzRadius():
if request.method == 'POST':
ssidName = request.form["ssid_name"]
vlan = request.form["vlan"]
bridgeGroup = request.form["bridge_group_id"]
radioId = request.form["radio_sub_id"]
gigaId = request.form["giga_sub_id"]
radiusIp = request.form["radius_ip"]
sharedSecret = request.form["shared_secret"]
authPort = request.form["auth_port"]
acctPort = request.form["acct_port"]
radiusTimeout = request.form["radius_timeout"]
radiusGroup = request.form["radius_group"]
methodList = request.form["method_list"]
conn = cardinalSql()
addSsid5GhzRadiusCursor = conn.cursor()
addSsid5GhzRadiusCursor.execute("INSERT INTO ssids_5ghz_radius (ap_ssid_name, ap_ssid_vlan, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id, ap_ssid_radius_server, ap_ssid_radius_secret, ap_ssid_authorization_port, ap_ssid_accounting_port, ap_ssid_radius_timeout, ap_ssid_radius_group, ap_ssid_radius_method_list) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}')".format(ssidName,vlan,bridgeGroup,radioId,gigaId,radiusIp,sharedSecret,authPort,acctPort,radiusTimeout,radiusGroup,methodList))
addSsid5GhzRadiusCursor.close()
conn.commit()
conn.close()
status = "Success! {} was successfully registered!".format(ssidName)
return redirect(url_for('addSsid5GhzRadius', status=status))
@Cardinal.route("/deploy-ssids", methods=["GET"])
def deploySsids():
if session.get("username") is not None:
return render_template("deploy-ssids.html")
@Cardinal.route("/deploy-ssid-24ghz", methods=["GET"])
def deploySsid24Ghz():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deploySsidCursor = conn.cursor()
deploySsidCursor.execute("SELECT ap_ssid_id,ap_ssid_name FROM ssids_24ghz")
ssids = deploySsidCursor.fetchall()
deploySsidCursor.close()
conn.close()
return render_template("deploy-ssid-24ghz.html", status=status, ssids=ssids)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-deploy-ssid-24ghz", methods=["POST"])
def doDeploySsid24Ghz():
ssidId = request.form["ssid_id"]
apId = session.get('apId')
apName = session.get('apName')
apGroupId = session.get('apGroupId')
apGroupName = session.get('apGroupName')
conn = cardinalSql()
try:
checkSsidRelationship = conn.cursor()
checkSsidRelationship.execute("INSERT INTO ssids_24ghz_deployed (ap_id,ssid_id) VALUES ('{}', '{}')".format(apId,ssidId))
checkSsidRelationship.close()
except MySQLdb.Error as e:
status = "{0} already has the SSID deployed: {1}".format(apName,e)
logging.error("{0} already has the SSID deployed: {1}".format(apName,e))
return redirect(url_for('deploySsid24Ghz', status=status))
else:
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_name,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_id = '{}'".format(apId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
ssidInfoCursor = conn.cursor()
ssidInfoCursor.execute("SELECT ap_ssid_name, ap_ssid_vlan, ap_ssid_wpa2, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id FROM ssids_24ghz WHERE ap_ssid_id = '{}'".format(ssidId))
ssidInfo = ssidInfoCursor.fetchall()
for ssidData in ssidInfo:
ssid = ssidData[0]
vlan = ssidData[1]
wpa2Pass = ssidData[2]
bridgeGroup = ssidData[3]
radioSub = ssidData[4]
gigaSub = ssidData[5]
for info in apInfo:
apName = info[0]
apIp = info[1]
apSshUsername = info[2]
apSshPassword = info[3]
subprocess.check_output("scout --create-ssid-24 {0} {1} {2} {3} {4} {5} {6} {7} {8}".format(apIp,apSshUsername,apSshPassword,ssid,wpa2Pass,vlan,bridgeGroup,radioSub,gigaSub), shell=True)
status = "The Deployment of 2.4GHz SSID {0} for AP {1} Has Been Successfully Initiated!".format(ssid,apName)
return redirect(url_for('deploySsid24Ghz', status=status))
finally:
conn.commit()
conn.close()
if request.method == 'POST' and apGroupId is not None:
apInfoCursor = conn.cursor()
apInfoCursor.execute("SELECT ap_id,ap_ip,ap_ssh_username,ap_ssh_password FROM access_points WHERE ap_group_id = '{}'".format(apGroupId))
apInfo = apInfoCursor.fetchall()
apInfoCursor.close()
ssidInfoCursor = conn.cursor()
ssidInfoCursor.execute("SELECT ap_ssid_name, ap_ssid_vlan, ap_ssid_wpa2, ap_ssid_bridge_id, ap_ssid_radio_id, ap_ssid_ethernet_id FROM ssids_24ghz WHERE ap_ssid_id = '{}'".format(ssidId))
ssidInfo = ssidInfoCursor.fetchall()
ssidInfoCursor.close()
for ssidData in ssidInfo:
ssid = ssidData[0]
vlan = ssidData[1]
wpa2Pass = ssidData[2]
bridgeGroup = ssidData[3]
radioSub = ssidData[4]
gigaSub = ssidData[5]
for info in apInfo:
apId = info[0]
apGroupName = info[1]
apIp = info[2]
apSshUsername = info[3]
apSshPassword = info[4]
checkSsidRelationship = conn.cursor()
checkSsidRelationship.execute("SELECT ssid_id FROM ssids_24ghz_deployed WHERE ap_id = '{}'".format(apId))
checkSsidId = checkSsidRelationship.fetchone()[0]
if checkSsidId == apId:
status = "Sorry, this SSID is already deployed to {}".format(apName)
logging.error("{0} already has the SSID deployed".format(apName))
subprocess.check_output("scout --create-ssid-24 {0} {1} {2} {3} {4} {5} {6} {7} {8}".format(apIp,apSshUsername,apSshPassword,ssid,wpa2Pass,vlan,bridgeGroup,radioSub,gigaSub), shell=True)
deploySsidSql = conn.cursor()
deploySsidSql.execute("INSERT INTO ssids_24ghz_deployed (ap_id, ssid_id) VALUES ('{0}','{1}')".format(apId,ssid))
status = "The Deployment of 2.4GHz SSID {0} Has Been Successfully Initiated for AP Group {1}".format(ssid,apGroupName)
conn.close()
return redirect(url_for('deploySsid24Ghz', status=status))
@Cardinal.route("/delete-ssids", methods=["GET"])
def deleteSsids():
if session.get("username") is not None:
return render_template("delete-ssids.html")
@Cardinal.route("/delete-ssid-24ghz", methods=["GET"])
def deleteSsid24Ghz():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("SELECT ap_ssid_id,ap_ssid_name FROM ssids_24ghz")
ssids = deleteSsidCursor.fetchall()
deleteSsidCursor.close()
conn.close()
return render_template("delete-ssid-24ghz.html", status=status, ssids=ssids)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ssid-24ghz", methods=["POST"])
def doDeleteSsid24Ghz():
if request.method == 'POST':
ssidId = request.form["ssid_id"]
conn = cardinalSql()
deleteSsidNameCursor = conn.cursor()
deleteSsidNameCursor.execute("SELECT ap_ssid_name FROM ssids_24ghz WHERE ap_ssid_id = '{}'".format(ssidId))
ssidName = deleteSsidNameCursor.fetchone()[0]
deleteSsidNameCursor.close()
status = "Success! {} was successfully deleted!".format(ssidName)
try:
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("DELETE FROM ssids_24ghz WHERE ap_ssid_id = '{}'".format(ssidId))
deleteSsidCursor.close()
except MySQLdb.Error as e:
status = e
finally:
conn.commit()
conn.close()
return redirect(url_for('deleteSsid24Ghz', status=status))
@Cardinal.route("/delete-ssid-5ghz", methods=["GET"])
def deleteSsid5Ghz():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("SELECT ap_ssid_id,ap_ssid_name FROM ssids_5ghz")
ssids = deleteSsidCursor.fetchall()
deleteSsidCursor.close()
conn.close()
return render_template("delete-ssid-5ghz.html", status=status, ssids=ssids)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ssid-5ghz", methods=["POST"])
def doDeleteSsid5Ghz():
if request.method == 'POST':
ssidId = request.form["ssid_id"]
conn = cardinalSql()
deleteSsidNameCursor = conn.cursor()
deleteSsidNameCursor.execute("SELECT ap_ssid_name FROM ssids_5ghz WHERE ap_ssid_id = '{}'".format(ssidId))
ssidName = deleteSsidNameCursor.fetchone()[0]
deleteSsidNameCursor.close()
status = "Success! {} was successfully deleted!".format(ssidName)
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("DELETE FROM ssids_5ghz WHERE ap_ssid_id = '{}'".format(ssidId))
deleteSsidCursor.close()
conn.commit()
conn.close()
return redirect(url_for('deleteSsid5Ghz', status=status))
@Cardinal.route("/delete-ssid-24ghz-radius", methods=["GET"])
def deleteSsid24GhzRadius():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("SELECT ap_ssid_id,ap_ssid_name FROM ssids_24ghz_radius")
ssids = deleteSsidCursor.fetchall()
deleteSsidCursor.close()
conn.close()
return render_template("delete-ssid-24ghz-radius.html", status=status, ssids=ssids)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ssid-24ghz-radius", methods=["POST"])
def doDeleteSsid24GhzRadius():
if request.method == 'POST':
ssidId = request.form["ssid_id"]
conn = cardinalSql()
deleteSsidNameCursor = conn.cursor()
deleteSsidNameCursor.execute("SELECT ap_ssid_name FROM ssids_24ghz_radius WHERE ap_ssid_id = '{}'".format(ssidId))
ssidName = deleteSsidNameCursor.fetchone()[0]
deleteSsidNameCursor.close()
status = "Success! {} was successfully deleted!".format(ssidName)
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("DELETE FROM ssids_24ghz_radius WHERE ap_ssid_id = '{}'".format(ssidId))
deleteSsidCursor.close()
conn.commit()
conn.close()
return redirect(url_for('deleteSsid24GhzRadius', status=status))
@Cardinal.route("/delete-ssid-5ghz-radius", methods=["GET"])
def deleteSsid5GhzRadius():
if session.get("username") is not None:
conn = cardinalSql()
status = request.args.get('status')
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("SELECT ap_ssid_id,ap_ssid_name FROM ssids_5ghz_radius")
ssids = deleteSsidCursor.fetchall()
deleteSsidCursor.close()
conn.close()
return render_template("delete-ssid-5ghz-radius.html", status=status, ssids=ssids)
else:
return redirect(url_for('index'))
@Cardinal.route("/do-delete-ssid-5ghz-radius", methods=["POST"])
def doDeleteSsid5GhzRadius():
if request.method == 'POST':
ssidId = request.form["ssid_id"]
conn = cardinalSql()
deleteSsidNameCursor = conn.cursor()
deleteSsidNameCursor.execute("SELECT ap_ssid_name FROM ssids_5ghz_radius WHERE ap_ssid_id = '{}'".format(ssidId))
ssidName = deleteSsidNameCursor.fetchone()[0]
deleteSsidNameCursor.close()
status = "Success! {} was successfully deleted!".format(ssidName)
deleteSsidCursor = conn.cursor()
deleteSsidCursor.execute("DELETE FROM ssids_5ghz_radius WHERE ap_ssid_id = '{}'".format(ssidId))
deleteSsidCursor.close()
conn.commit()
conn.close()
return redirect(url_for('deleteSsid5GhzRadius', status=status))
@Cardinal.route("/total-ap-clients", methods=["GET"])
def totalApClients():
if session.get("username") is not None:
return render_template("total-ap-clients.html")
@Cardinal.route("/total-ap-bandwidth", methods=["GET"])
def totalApBandwidth():
if session.get("username") is not None:
return render_template("total-ap-bandwidth.html")
@Cardinal.route("/ap-ip-address", methods=["GET"])
def apIpAddress():
if session.get("username") is not None:
return render_template("ap-ip-address.html")
@Cardinal.route("/total-aps", methods=["GET"])
def totalAps():
if session.get("username") is not None:
conn = cardinalSql()
totalApsCursor = conn.cursor()
totalApsCursor.execute("SELECT COUNT(*) FROM access_points")
totalAps = totalApsCursor.fetchone()[0]
totalApsCursor.close()
conn.close()
return render_template('total-aps.html', totalAps=totalAps)
else:
return redirect(url_for('index'))
@Cardinal.route("/total-clients", methods=["GET"])
def totalClients():
if session.get("username") is not None:
conn = cardinalSql()
totalClientsCursor = conn.cursor()
totalClientsCursor.execute("SELECT FORMAT(SUM(ap_total_clients),0) AS totalClients FROM access_points")
totalClients = totalClientsCursor.fetchone()[0]
totalClientsCursor.close()
conn.close()
return render_template('total-clients.html', totalClients=totalClients)
else:
return redirect(url_for('index'))
@Cardinal.route("/total-ap-groups", methods=["GET"])
def totalApGroups():
if session.get("username") is not None:
conn = cardinalSql()
totalApGroupsCursor = conn.cursor()
totalApGroupsCursor.execute("SELECT COUNT(*) FROM access_point_groups")
totalApGroups = totalApGroupsCursor.fetchone()[0]
totalApGroupsCursor.close()
conn.close()
return render_template('total-ap-groups.html', totalApGroups=totalApGroups)
else:
return redirect(url_for('index'))
@Cardinal.route("/total-ssids", methods=["GET"])
def totalSsids():
if session.get("username") is not None:
conn = cardinalSql()
ssids24Cursor = conn.cursor()
ssids5Cursor = conn.cursor()
ssids24RadiusCursor = conn.cursor()
ssids5RadiusCursor = conn.cursor()
ssids24Cursor.execute("SELECT COUNT(*) FROM ssids_24ghz")
ssids5Cursor.execute("SELECT COUNT(*) FROM ssids_5ghz")
ssids24RadiusCursor.execute("SELECT COUNT(*) FROM ssids_24ghz_radius")
ssids5RadiusCursor.execute("SELECT COUNT(*) FROM ssids_5ghz_radius")
ssids24 = ssids24Cursor.fetchone()[0]
ssids5 = ssids5Cursor.fetchone()[0]
ssids24Radius = ssids24RadiusCursor.fetchone()[0]
ssids5Radius = ssids5RadiusCursor.fetchone()[0]
totalSsids = ssids24 + ssids5 + ssids24Radius + ssids5Radius
ssids24Cursor.close()
ssids5Cursor.close()
ssids24RadiusCursor.close()
ssids5RadiusCursor.close()
conn.close()
return render_template('total-ssids.html', totalSsids=totalSsids)
else:
return redirect(url_for('index'))
if __name__ == "__main__":
Cardinal.run(debug=True, host='0.0.0.0')
| {
"content_hash": "0ff08f6fb9c99e6b6378de266f0262a1",
"timestamp": "",
"source": "github",
"line_count": 1009,
"max_line_length": 554,
"avg_line_length": 44.447968285431116,
"alnum_prop": 0.6476097038886907,
"repo_name": "falcon78921/Cardinal",
"id": "3c350937fa6cc9463f87dde798cb7ef575a7bd83",
"size": "44873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10860"
},
{
"name": "HTML",
"bytes": "55785"
},
{
"name": "JavaScript",
"bytes": "20260"
},
{
"name": "Python",
"bytes": "79589"
},
{
"name": "Shell",
"bytes": "6025"
},
{
"name": "TSQL",
"bytes": "8417"
}
],
"symlink_target": ""
} |
"""Module to apply mutants at bytecode level"""
import random
import subprocess
JUMP_OPCODES = ["je", "jne", "jl", "jle", "jg", "jge"]
SHORT_JUMPS = list(
map(bytes.fromhex, ["74", "75", "7C", "7D", "7E", "7F", "EB"]))
# no unconditional for near jumps, since changes opcode length, not worth it
NEAR_JUMPS = list(
map(
bytes.fromhex,
["0F 84", "0F 85", "0F 8C", "0F 8D", "0F 8E", "0F 8F", "90 E9"],
))
# known markers for fuzzer/compiler injected instrumentation/etc.
INST_SET = ["__afl", "__asan", "__ubsan", "__sanitizer", "__lsan", "__sancov"]
def get_jumps(filename): # pylint: disable=too-many-locals
"""Method to get all jumps in file"""
jumps = {}
proc = subprocess.Popen(
["objdump", "-d", "--file-offsets", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, _ = proc.communicate()
output = str(out, encoding="utf-8")
for line in output.split("\n"):
try:
if "File Offset" in line and line[-1] == ":":
section_base = int(line.split()[0], 16)
offset_hex = line.split("File Offset:")[1].split(")")[0]
section_offset = int(offset_hex, 16) - section_base
continue
found_inst = False
for i in INST_SET:
if i in line:
found_inst = True
break
if found_inst:
continue # Don't mutate these things
fields = line.split("\t")
if len(fields) > 1:
opcode = fields[2].split()[0]
if opcode in JUMP_OPCODES:
loc_bytes = fields[0].split(":")[0]
loc = int(loc_bytes, 16) + section_offset
jumps[loc] = (opcode, bytes.fromhex(fields[1]))
# pylint: disable=bare-except
except: # If we can't parse some line in the objdump, just skip it
pass
return jumps
def different_jump(hexdata):
"""Method to select a different jump"""
# NEAR JUMP BYTE CHECK
if hexdata[0] == 15: # pylint: disable=no-else-return
# Have a high chance of just changing near JE and JNE to a
# forced JMP, "removing" a branch
if ((hexdata[1] == NEAR_JUMPS[0][1]) or
(hexdata[1] == NEAR_JUMPS[1][1])) and (random.random() <= 0.75):
return NEAR_JUMPS[-1]
return random.choice(
list(filter(lambda j: j[1] != hexdata[1], NEAR_JUMPS)))
else:
# Have a high chance of just changing short JE and
# JNE to a forced JMP, "removing" a branch
if ((hexdata[0] == SHORT_JUMPS[0][0]) or
(hexdata[0] == SHORT_JUMPS[1][0])) and (random.random() <= 0.75):
return SHORT_JUMPS[-1]
return random.choice(
list(filter(lambda j: j[0] != hexdata[0], SHORT_JUMPS)))
def pick_and_change(jumps):
"""Randomly change jumps"""
loc = random.choice(list(jumps.keys()))
return (loc, different_jump(jumps[loc][1]))
def get_code(filename):
"""Read code as array of bytes"""
with open(filename, "rb") as f_name:
return bytearray(f_name.read())
def mutant_from(code, jumps, order=1):
"""Get new code from code and jumps"""
new_code = bytearray(code)
for _ in range(
order): # allows higher-order mutants, though can undo mutations
(loc, new_data) = pick_and_change(jumps)
for offset in range(0, len(new_data)): # pylint: disable=consider-using-enumerate
new_code[loc + offset] = new_data[offset]
return new_code
def mutant(filename, order=1):
"""Write mutant to file"""
return mutant_from(get_code(filename), get_jumps(filename), order=order)
def mutate_from(code, jumps, new_filename, order=1):
"""Wrap mutant_from wth order to write to new_filename"""
with open(new_filename, "wb") as f_name:
f_name.write(mutant_from(code, jumps, order=order))
def mutate(filename, new_filename, order=1):
"""Write mutant to new file"""
with open(new_filename, "wb") as f_name:
f_name.write(mutant(filename, order=order))
| {
"content_hash": "4331ae94202b17f8db11f91035fb726c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 90,
"avg_line_length": 35.56410256410256,
"alnum_prop": 0.5671713530401346,
"repo_name": "google/fuzzbench",
"id": "e5e388a90a6dcb4630fe198c5c1f267e777d694e",
"size": "4731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzers/aflplusplus_muttfuzz/mutate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17334"
},
{
"name": "C++",
"bytes": "37645"
},
{
"name": "Dockerfile",
"bytes": "337043"
},
{
"name": "HTML",
"bytes": "25840"
},
{
"name": "Jupyter Notebook",
"bytes": "578996"
},
{
"name": "Makefile",
"bytes": "2810"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1222236"
},
{
"name": "Shell",
"bytes": "86157"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True)),
('population', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('caseno', models.CharField(help_text=b'Case number', max_length=10, db_index=True)),
('address', models.CharField(max_length=200, null=True)),
('date', models.DateField(null=True, blank=True)),
('time', models.TimeField(null=True, blank=True)),
('offense', models.CharField(max_length=100)),
('description', models.CharField(max_length=100)),
('category', models.CharField(default=b'N', max_length=1, choices=[(b'V', b'Violent'), (b'P', b'Property'), (b'Q', b'Quality of life'), (b'N', b'Uncategorized')])),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True)),
('hbin', models.ForeignKey(related_name='incidents', to='ucpd.Bin', null=True)),
],
options={
'ordering': ['-caseno'],
},
),
]
| {
"content_hash": "d8bb17455c73677b18339105908ba40c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 180,
"avg_line_length": 43.94871794871795,
"alnum_prop": 0.5600933488914819,
"repo_name": "sahilchinoy/ucpd-crime",
"id": "c59509e063537f6a01602a2e72da243df9e6785f",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ucpd/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12395"
},
{
"name": "HTML",
"bytes": "43329"
},
{
"name": "JavaScript",
"bytes": "93937"
},
{
"name": "Python",
"bytes": "33421"
}
],
"symlink_target": ""
} |
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("BinaryClass_500" , "CalibratedClassifierCV_sigmoid_15")
| {
"content_hash": "3cccfecacfbef705c708a24fa879565c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 93,
"avg_line_length": 43,
"alnum_prop": 0.813953488372093,
"repo_name": "antoinecarme/sklearn_explain",
"id": "19e1f11b6e9b5ce4f1d6152d8416efa758316ab2",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/skl_datasets/BinaryClass_500/skl_dataset_BinaryClass_500_CalibratedClassifierCV_sigmoid_15_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110343"
}
],
"symlink_target": ""
} |
"""Services for exploration-related statistics."""
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_jobs_continuous
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
IMPROVE_TYPE_DEFAULT = 'default'
IMPROVE_TYPE_INCOMPLETE = 'incomplete'
# TODO(bhenning): Everything is handler name submit; therefore, it is
# pointless and should be removed.
_OLD_SUBMIT_HANDLER_NAME = 'submit'
def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
return {
answer: count for (answer, count) in
stats_domain.StateRuleAnswerLog.get(
exploration_id, state_name, exp_domain.DEFAULT_RULESPEC_STR
).get_top_answers(3)
}
def get_state_rules_stats(exploration_id, state_name):
"""Gets statistics for the answer groups and rules of this state.
Returns:
A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
values are the corresponding stats_domain.StateRuleAnswerLog
instances.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states[state_name]
rule_keys = []
for group in state.interaction.answer_groups:
for rule in group.rule_specs:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))
if state.interaction.default_outcome:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_key[1]
} for rule_key in rule_keys])
results = {}
for ind, answer_log in enumerate(answer_logs):
results['.'.join(rule_keys[ind])] = {
'answers': answer_log.get_top_answers(5),
'rule_hits': answer_log.total_answer_count
}
return results
def get_top_state_rule_answers(exploration_id, state_name, rule_str_list):
"""Returns a list of top answers (by submission frequency) submitted to the
given state in the given exploration which were mapped to any of the rules
listed in 'rule_str_list'. All answers submitted to the specified state and
match the rule spec strings in rule_str_list are returned.
"""
return get_top_state_rule_answers_multi(
[(exploration_id, state_name)], rule_str_list)[0]
def get_top_state_rule_answers_multi(exploration_state_list, rule_str_list):
"""Returns a list of top answers (by submission frequency) submitted to the
given explorations and states which were mapped to any of the rules listed
in 'rule_str_list' for each exploration ID and state name tuple in
exploration_state_list.
For each exploration ID and state, all answers submitted that match any of
the rule spec strings in rule_str_list are returned.
"""
answer_log_list = (
stats_domain.StateRuleAnswerLog.get_multi_by_multi_explorations(
exploration_state_list, rule_str_list))
return [[
{
'value': top_answer[0],
'count': top_answer[1]
}
for top_answer in answer_log.get_all_top_answers()
] for answer_log in answer_log_list]
def get_state_improvements(exploration_id, exploration_version):
"""Returns a list of dicts, each representing a suggestion for improvement
to a particular state.
"""
ranked_states = []
exploration = exp_services.get_exploration_by_id(exploration_id)
state_names = exploration.states.keys()
default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': exp_domain.DEFAULT_RULESPEC_STR
} for state_name in state_names])
statistics = stats_jobs_continuous.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
state_hit_counts = statistics['state_hit_counts']
for ind, state_name in enumerate(state_names):
total_entry_count = 0
no_answer_submitted_count = 0
if state_name in state_hit_counts:
total_entry_count = (
state_hit_counts[state_name]['total_entry_count'])
no_answer_submitted_count = state_hit_counts[state_name].get(
'no_answer_count', 0)
if total_entry_count == 0:
continue
threshold = 0.2 * total_entry_count
default_rule_answer_log = default_rule_answer_logs[ind]
default_count = default_rule_answer_log.total_answer_count
eligible_flags = []
state = exploration.states[state_name]
if (default_count > threshold and
state.interaction.default_outcome is not None and
state.interaction.default_outcome.dest == state_name):
eligible_flags.append({
'rank': default_count,
'improve_type': IMPROVE_TYPE_DEFAULT})
if no_answer_submitted_count > threshold:
eligible_flags.append({
'rank': no_answer_submitted_count,
'improve_type': IMPROVE_TYPE_INCOMPLETE})
if eligible_flags:
eligible_flags = sorted(
eligible_flags, key=lambda flag: flag['rank'], reverse=True)
ranked_states.append({
'rank': eligible_flags[0]['rank'],
'state_name': state_name,
'type': eligible_flags[0]['improve_type'],
})
return sorted([
ranked_state for ranked_state in ranked_states
if ranked_state['rank'] != 0
], key=lambda x: -x['rank'])
def get_versions_for_exploration_stats(exploration_id):
"""Returns list of versions for this exploration."""
return stats_models.ExplorationAnnotationsModel.get_versions(
exploration_id)
def get_exploration_stats(exploration_id, exploration_version):
"""Returns a dict with state statistics for the given exploration id.
Note that exploration_version should be a string.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
exp_stats = stats_jobs_continuous.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
last_updated = exp_stats['last_updated']
state_hit_counts = exp_stats['state_hit_counts']
return {
'improvements': get_state_improvements(
exploration_id, exploration_version),
'last_updated': last_updated,
'num_completions': exp_stats['complete_exploration_count'],
'num_starts': exp_stats['start_exploration_count'],
'state_stats': {
state_name: {
'name': state_name,
'firstEntryCount': (
state_hit_counts[state_name]['first_entry_count']
if state_name in state_hit_counts else 0),
'totalEntryCount': (
state_hit_counts[state_name]['total_entry_count']
if state_name in state_hit_counts else 0),
} for state_name in exploration.states
},
}
| {
"content_hash": "152888fc2420668e7b772d03d4666eee",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 37.60621761658031,
"alnum_prop": 0.6412234775420226,
"repo_name": "anggorodewanto/oppia",
"id": "4bfe86ae93f5c037508cb51d5ce73beba4b02b09",
"size": "7881",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/stats_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "79476"
},
{
"name": "HTML",
"bytes": "500455"
},
{
"name": "JavaScript",
"bytes": "2202728"
},
{
"name": "Python",
"bytes": "2372967"
},
{
"name": "Shell",
"bytes": "42367"
}
],
"symlink_target": ""
} |
import numpy as np
import sys
import csv
"""This file is not well tested at this time, good chance it has bugs"""
#Max number of iterations in the K-medoids algorithm
K_MEDIODS_ITERATIONS = 100
#About how many teams will be in each cluster. Used to calculate the k of k-means
APPROX_CLUSTER_SIZE = 2
#Number of places to round resuts to
RESULT_DECIMAL_NUM = 2
#The filename for the CSV file (stored in the C_Scores directory) with seed data (each row a team, with
#with 16 PTPS scores followed by some number (currently 10) outgoing survey scores)
SEED_DATA_FILE = 'seedData.csv'
def readSeed(seedFile = SEED_DATA_FILE):
with open(seedFile, 'r') as file:
seed = []
seedReader = csv.reader(file, delimiter=',', quotechar='|')
for row in seedReader:
row = list(map(lambda x: float(x), row))
PTPSScores = [row[:4], row[4:8], row[8:12], row[12:16]]
successScores = row[16:]
team = [PTPSScores, successScores]
seed.append(team)
return seed
#List of previous data tuples with team scores array and outgoing score
TEAMS_DATA = readSeed(SEED_DATA_FILE)
def euclideanDistance(v1, v2):
'''calculates the classic euclidean distance between two vectors'''
dist = 0
for i in range(len(v1)):
if type(v1[i]) in [float,int]:
dist += (v1[i] - v2[i])**2
else:
dist += (euclideanDistance(v1[i],v2[i]))**2
dist = np.sqrt(dist)
return dist
def populatePermDict():
'''assigns each integer 0-23 to a unique permutation in S4 used in the current
implementation of permute(vector, perm)'''
pD = {}
pD[0] = [0,1,2,3]
pD[1] = [0,1,3,2]
pD[2] = [0,2,1,3]
pD[3] = [0,2,3,1]
pD[4] = [0,3,1,2]
pD[5] = [0,3,2,1]
pD[6] = [1,0,2,3]
pD[7] = [1,0,3,2]
pD[8] = [1,2,0,3]
pD[9] = [1,2,3,0]
pD[10] = [1,3,0,2]
pD[11] = [1,3,2,0]
pD[12] = [2,0,1,3]
pD[13] = [2,0,3,1]
pD[14] = [2,1,0,3]
pD[15] = [2,1,3,0]
pD[16] = [2,3,0,1]
pD[17] = [2,3,1,0]
pD[18] = [3,0,1,2]
pD[19] = [3,0,2,1]
pD[20] = [3,1,0,2]
pD[21] = [3,1,2,0]
pD[22] = [3,2,0,1]
pD[23] = [3,2,1,0]
return pD
def permute(vector, perm):
'''takes a vector and permutation (number) and returns a new vector with it's 4
elements shuffled according to that permutation. There is almost certainly a more
elegant way to do this.'''
permDict = populatePermDict()
sigma = permDict[perm]
vector = [vector[sigma[0]], vector[sigma[1]], vector[sigma[2]], vector[sigma[3]]]
return vector
def distance(v1, v2):
'''custom distance function required to calculate k-means computes the minimal
euclidean distance between v1, v2 when the (4) elements are permuted in every
possible way'''
dist = float("inf")
for i in range(24):
newDist = euclideanDistance(permute(v1, i), v2)
if newDist <= dist:
dist = newDist
return dist
def pairwiseDistances(vectors):
'''Helper Function for kMedoids which increases efficiency by putting all distances in a
2D array so they only need to be calculated once'''
n = len(vectors)
D = np.reshape(np.arange(n*n), (n,n))
for i in range(n):
for j in range(i+1):
dist = distance(vectors[i], vectors[j])
#Array is symetric, so we fill in on both sides of the diagonal together
D[i,j] = dist
D[j,i] = dist
return D
'''
Description: Takes a list of 'team' vectors and the desired number of medoids and returns a list of final
medoids, as well as a dictionary representing the clusters.
'''
def kMedoids(vectors, k, tmax = K_MEDIODS_ITERATIONS):
"""Note: this function heavily inspired by the paper located at
https://www.researchgate.net/publication/272351873_NumPy_SciPy_Recipes_for_Data_Science_k-Medoids_Clustering"""
#Precompute distance matrix for efficiency
D = pairwiseDistances(vectors)
#figure out how many teams there are
n = len(D)
if k > n:
raise Exception('too many medoids')
# randomly initialize an array of k medoid indices
M = np.arange(n)
np.random.shuffle(M)
M = np.sort(M[:k])
# create a copy of the array of medoid indices
Mnew = np.copy(M)
#Create a dictionary to represent clusters
C = {}
#Everything in here is somewhat black magic indexing found at the credited link above
for t in range(tmax):
# determine clusters, i. e. arrays of data indices
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# update cluster medoids
for kappa in range(k):
J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
j = np.argmin(J)
Mnew[kappa] = C[kappa][j]
np.sort(Mnew)
# check for convergence
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
# final update of cluster memberships
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# return results
return C
'''
Description: This function should be run once (at server start?). It uses past data
(currently hardcoded into this file) in the form of a list of team, score
tuples to calculate means and assign them scores. This is with the help of
the kMedoids function above to generate medoids.
NOTE: The scores given in seed data and assigned to clusters are a collection of scores
recieved on the 10 question of the Team Success Survey currently in use. However,
this can be easily changed by making changes to the number of scores given in seed
data, along with appropriate changes to the prediction veiws.
Output: A dictionary whose keys are the 'medoids' of kMedoids (as tuples of lists, so
that they are hashable) and whose values are the outgoing score
(float) associated with that medoid. This should be stored somehow in
the application and used as a parameter of analyze(newTeam, medScores)
every time it is called.
'''
def preAnalyze(seed = TEAMS_DATA):
vectors = []
for team in seed:
vectors.append(team[0])
C = kMedoids(vectors, int(len(vectors)/APPROX_CLUSTER_SIZE))
#reconfigure the C dictionary to be mure useful, as pairs {med_indx: list of cluter's vectors}
Clusters = {}
for medI in C:
Clusters[medI] = C[medI].tolist()
#will hold the assigned score to each med
medScores = {}
#keep track of the number of teams assigned to each med
teamsInMeds = {}
for medI in Clusters:
#initialize teamsinmeans dictionary entries to 0
teamsInMeds[medI] = 0
#populatet ScoreDict with entries to fill in the score of each mean
medScores[medI] = [0 for x in range(len(seed[0][1]))]
for team_indx in Clusters[medI]:
#Add 1 to the number of teams in the cluster of the current team
teamsInMeds[medI] += 1
#add scores to the cluster's aggregate
for i in range(len(seed[team_indx][1])):
medScores[medI][i]+=(seed[team_indx][1][i])
#get the AVERAGE score for teams in a cluster, and add the med vector ot the value
medScores[medI] = list(map(lambda x:x/teamsInMeds[medI], medScores[medI]))
#Add the med vector to the information stored in the list
medScores[medI] = (vectors[medI], medScores[medI])
return medScores
'''
Description: This function should be called when the user asks for a new prediction.
It classifies a hypothetical team based on stored past data, and returns the
outgoing score associated with that classification.
Output: An list of float values describing the predicted success of input team, one value for each qeustion
on the current iteration of the team success survey.
Parameters:
newTeam - a 16-vector (currently expected as a list of 4 lists length 4, each
representing the scores of a particualr team member) selected by the user
medScores - a python dictionary computed earlier (possibly server start?) whose keys
are the indicies of medoids used to classify newTeam and whose values
are the outgoing score the mediod and the score (float) associated with that mean. This should be stored
somehow in the application and entered as this parameter for every call of this
function.
'''
def analyze(newTeam, medScores):
#a placeholder for the closest mean
closeMed = 0
#start dist of as inf to ensure we select a closer mean
dist = float("inf")
#find the closest mean
for medI in medScores:
if distance(newTeam, medScores[medI][0]) < dist:
closeMed = medI
dist = distance(newTeam, medScores[medI][0])
#return the score associated with that mean
result = medScores[closeMed][1]
#truncate the scores after the 100th place
result = (np.around(result, RESULT_DECIMAL_NUM)).tolist()
return result
#Below is machinery for alowing us to call the functions in this file from the app
def main(argv):
x = preAnalyze();
import ast
l = ast.literal_eval(''.join(argv))
y = analyze(l,x)
print(y)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"content_hash": "95d619f8f3cd68a1dfe6b10253c6bb05",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 112,
"avg_line_length": 33.774809160305345,
"alnum_prop": 0.6789467736467397,
"repo_name": "Hmc-cs-tdubno/C_Scores",
"id": "d084ab034fa26de4fcf9d0f8676fb362c3d6d00a",
"size": "8849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TeamScoreAnalysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2169"
},
{
"name": "CoffeeScript",
"bytes": "844"
},
{
"name": "HTML",
"bytes": "31793"
},
{
"name": "JavaScript",
"bytes": "17319"
},
{
"name": "Python",
"bytes": "12713"
},
{
"name": "Ruby",
"bytes": "70488"
}
],
"symlink_target": ""
} |
"""Converts an old version's data base to the recent format.
The database of the rc-1 did not have a priority field. This script
just adds this to all issues."""
import os
import pickle
ISSUE_FILE = '.issues'
if __name__ == "__main__":
with open(ISSUE_FILE,'r') as issues:
DB = pickle.load(issues)
for issue in DB.values():
if not hasattr(issue, 'priority'):
issue.priority = 3
with open(ISSUE_FILE, 'w') as issues:
pickle.dump(DB, issues, -1)
| {
"content_hash": "bc0da7fc6ba6751c45fa7ba27d1d4009",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 27.210526315789473,
"alnum_prop": 0.6054158607350096,
"repo_name": "dhesse/Git-Track",
"id": "244982d0ad740400ad6877de63584fc9bfb8487b",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10133"
}
],
"symlink_target": ""
} |
"""Compute the diffusion process from atom counts in two regions.
The diffusion of atoms from one region to another is modelled as a first-order
process. The flow rate of atoms is considered to be proportional to the
difference of the number of atoms in the two regions. By using this script,
the logarithm of the difference in the number of atoms is fitted linearly with
time. The resulted slop can be a measure of the diffusion rate.
"""
import argparse
import math
import numpy as np
import numpy.linalg
import matplotlib.pyplot as plt
def main():
"""Drive the script."""
# Parse the arguments.
parser = argparse.ArgumentParser(description=globals()['__doc__'])
parser.add_argument(
'input', type=argparse.FileType('r'), metavar='INPUT',
help='The input file, which contains time and number of atoms '
'in the two regions as columns'
)
parser.add_argument(
'--begin', '-b', type=float, metavar='TIME',
help='The beginning time for the fitting'
)
parser.add_argument(
'--end', '-e', type=float, metavar='TIME',
help='The end time for the fitting'
)
parser.add_argument(
'--graph', '-g', type=str, metavar='FILE', default='diffusion.png',
help='The graph file for the graphics of the fitting.'
)
args = parser.parse_args()
# Parse the filter the trajectory.
traj = parse_traj(args.input)
traj = filter_traj(traj, args.begin, args.end)
# Perform the fit.
diff_coeff, log_d0 = fit_traj(traj)
print('Diffusion coefficient: {}'.format(diff_coeff))
# Plot the graphics.
plot_fit(args.graph, traj, diff_coeff, log_d0)
return 0
def parse_traj(input_fp):
"""Parse the trajectory file.
The atoms diffuse from the second region to the first region.
"""
traj = [
[float(i) for i in line.split()]
for line in input_fp
]
if traj[0][1] > traj[0][2]:
traj = [[i[0], i[2], i[1]] for i in traj]
return traj
def filter_traj(traj, begin_time, end_time):
"""Filter the trajectory for only steps within the time region."""
filtered_traj = []
for i in traj:
time = i[0]
if end_time is not None and time > end_time:
break
elif begin_time is None or time > begin_time:
filtered_traj.append(i)
else:
continue
return filtered_traj
def fit_traj(traj):
"""Fir the log of atom number difference against time."""
time = np.array([[i[0], 1.0] for i in traj], dtype=np.float64)
diff = np.log(np.array(
[(i[2] - i[1]) for i in traj], dtype=np.float64
))
return np.linalg.lstsq(time, diff)[0]
def plot_fit(file_name, traj, diff_coeff, log_d0):
"""Plots the fitting curve."""
time = [i[0] for i in traj]
orig_diff = [i[2] - i[1] for i in traj]
fit_diff = [math.exp(diff_coeff * i + log_d0) for i in time]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Fit of the diffusion coefficient: {}'.format(diff_coeff))
ax.set_xlabel('Time')
ax.set_ylabel('Concentration difference')
ax.semilogy(
time, orig_diff, marker='x', linestyle='', label='Simulation'
)
ax.semilogy(
time, fit_diff, linestyle='-', label='fit'
)
ax.legend(loc='best', fancybox=True, shadow=True)
fig.savefig(file_name)
if __name__ == '__main__':
main()
| {
"content_hash": "a3fb47cfb9df49cbab7ef4998ce84880",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 29.23931623931624,
"alnum_prop": 0.6220403390821397,
"repo_name": "tschijnmo/lammps_scripts",
"id": "2b356b682e4b629d97ef6160fb321adb324de6c0",
"size": "3421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diffuse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10579"
}
],
"symlink_target": ""
} |
'''
Instructions:
1. Make sure you have Flask, Flask-Rauth, and SQLAlchemy installed.
$ pip install Flask Flask-Rauth SQLAlchemy
2. Open a Python shell in this directory and execute the following:
$ python
>>> from tweet import init_db
>>> init_db()
>>> exit()
This will initialize the SQLite database.
3. Start the application.
$ python tweet.py
4. Navigate your web browser to where this app is being served (localhost,
by default).
'''
from flask import Flask, request, redirect, url_for, session, flash, g, render_template
from flask.ext.rauth import RauthOAuth1
from sqlalchemy import create_engine, Column, Integer, String, Text
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# setup flask
app = Flask(__name__)
# you can specify the consumer key and consumer secret in the application,
# like this:
app.config.update(
TWITTER_CONSUMER_KEY='your_consumer_key',
TWITTER_CONSUMER_SECRET='your_consumer_secret',
SECRET_KEY='just a secret key, to confound the bad guys',
DEBUG = True
)
# setup the twitter endpoint
twitter = RauthOAuth1(
name='twitter',
base_url='https://api.twitter.com/1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize'
)
# this call simply initializes default an empty consumer key and secret in the app
# config if none exist.
# I've included it to match the "look" of Flask extensions
twitter.init_app(app)
# setup sqlalchemy
engine = create_engine('sqlite:////tmp/tweet.db')
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
Base.metadata.create_all(bind=engine)
class User(Base):
__tablename__ = 'users'
id = Column('user_id', Integer, primary_key=True)
name = Column(String(60))
oauth_token = Column(Text)
oauth_secret = Column(Text)
def __init__(self, name):
self.name = name
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.get(session['user_id'])
@app.after_request
def after_request(response):
db_session.remove()
return response
@twitter.tokengetter
def get_twitter_token():
'''
This is used by the API to look for the auth token and secret that are used
for Twitter API calls. If you don't want to store this in the database,
consider putting it into the session instead.
Since the Twitter API is OAuth 1.0a, the `tokengetter` must return a
2-tuple: (oauth_token, oauth_secret).
'''
user = g.user
if user is not None:
return user.oauth_token, user.oauth_secret
@app.route('/')
def index():
tweets = None
if g.user is not None:
resp = twitter.get('statuses/home_timeline.json')
if resp.status == 200:
tweets = resp.content
else:
flash('Unable to load tweets from Twitter. Maybe out of '
'API calls or Twitter is overloaded.')
return render_template('index.html', tweets=tweets)
@app.route('/tweet', methods=['POST'])
def tweet():
'''
Calls the remote twitter API to create a new status update.
'''
if g.user is None:
return redirect(url_for('login', next=request.url))
status = request.form['tweet']
if not status:
return redirect(url_for('index'))
resp = twitter.post('statuses/update.json', data={
'status': status
})
if resp.status == 403:
flash('Your tweet was too long.')
elif resp.status == 401:
flash('Authorization error with Twitter.')
else:
flash('Successfully tweeted your tweet (ID: #%s)' % resp.content['id'])
return redirect(url_for('index'))
@app.route('/login')
def login():
'''
Calling into `authorize` will cause the OAuth 1.0a machinery to kick
in. If all has worked out as expected or if the user denied access to
his/her information, the remote application will redirect back to the callback URL
provided.
Int our case, the 'authorized/' route handles the interaction after the redirect.
'''
return twitter.authorize(callback=url_for('authorized',
_external=True,
next=request.args.get('next') or request.referrer or None))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('You were signed out')
return redirect(request.referrer or url_for('index'))
@app.route('/authorized')
@twitter.authorized_handler()
def authorized(resp, oauth_token):
'''
Called after authorization. After this function finished handling,
the tokengetter from above is used to retrieve the 2-tuple containing the
oauth_token and oauth_token_secret.
Because reauthorization often changes any previous
oauth_token/oauth_token_secret values, then we must update them in the
database.
If the application redirected back after denying, the `resp` passed
to the function will be `None`. Unfortunately, OAuth 1.0a (the version
that Twitter, LinkedIn, etc use) does not specify exactly what should
happen when the user denies access. In the case of Twitter, a query
parameter `denied=(some hash)` is appended to the redirect URL.
'''
next_url = request.args.get('next') or url_for('index')
# check for the Twitter-specific "access_denied" indicator
if resp is None and 'denied' in request.args:
flash(u'You denied the request to sign in.')
return redirect(next_url)
# pull out the nicely parsed response content.
content = resp.content
user = User.query.filter_by(name=content['screen_name']).first()
# this if the first time signing in for this user
if user is None:
user = User(content['screen_name'])
db_session.add(user)
# we now update the oauth_token and oauth_token_secret
# this involves destructuring the 2-tuple that is passed back from the
# Twitter API, so it can be easily stored in the SQL database
user.oauth_token = oauth_token[0]
user.oauth_secret = oauth_token[1]
db_session.commit()
session['user_id'] = user.id
flash('You were signed in')
return redirect(next_url)
if __name__ == '__main__':
app.run()
| {
"content_hash": "882cf293e15f8724ec101fb6160ec910",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 89,
"avg_line_length": 30.52358490566038,
"alnum_prop": 0.6773296244784422,
"repo_name": "joelverhagen/flask-rauth",
"id": "4568f8e6f5498f1dafcbce739b5866ade9fb629b",
"size": "6471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/tweet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38883"
}
],
"symlink_target": ""
} |
from fabric import colors
from fabric.api import env, local, cd, prefix, run, hide, sudo, settings
from fabric.contrib.console import confirm
REMOTE_WORKDIR = ''
ACTIVATE = ''
hostnames = {
'52.78.100.214': 'oilboy',
}
def app():
global REMOTE_WORKDIR, ACTIVATE
env.hosts = ['52.78.100.214']
env.user = 'ubuntu'
env.key_filename = '~/.ssh/id_rsa'
VIRTUALENV_DIR = '/home/ubuntu/Oilboy'
REMOTE_WORKDIR = '%s/oilboy' % VIRTUALENV_DIR
ACTIVATE = 'source %s/bin/activate' % VIRTUALENV_DIR
def flake8():
with settings(hide('warnings'), warn_only=True):
result = local('find . -name "*.py" | xargs flake8 --ignore=E501,F401', capture=True)
if result.failed:
print '\n' + result
print "\nCode check failed with %d errors.." % (result.count('\n') + 1)
else:
print "\nCode check passed!"
def deploy():
with settings(hide('warnings'), warn_only=True):
result = local('find . -name "*.py" | xargs flake8 --ignore=E501,F401', capture=True)
print result
if result.failed and confirm("Tests failed. Cancel?", default=True):
return
with settings(hide('warnings'), warn_only=True):
result = local('git push origin master', capture=True)
print result.stderr
print result.stdout
if result.stderr == 'Everything up-to-date' and confirm("Nothing to push. Cancel?", default=True):
return
print(colors.blue(u'\u2728 Deploy to: %s\t\t[ ' % hostnames[env.host]) + colors.white('Started') + colors.blue(' ]'))
with cd(REMOTE_WORKDIR), prefix(ACTIVATE):
run('git stash save --keep-index', quiet=True)
run('git stash drop', quiet=True)
run('git pull origin')
with hide('output'):
run('pip install -r requirements.txt')
run('./manage.py collectstatic -i flags* -i cache* -i *.html -i *.htm --noinput')
sudo('supervisorctl update oilboy')
sudo('supervisorctl restart oilboy')
print(colors.blue(u'\u2705 Deploy to: %s\t\t[ ' % hostnames[env.host]) + colors.green('OK') + colors.blue(' ]'))
| {
"content_hash": "ff450a218ffc115afde0963214e3a9dd",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 122,
"avg_line_length": 31.102941176470587,
"alnum_prop": 0.6198581560283688,
"repo_name": "oilboy/oilboy",
"id": "80a9ce856bc03c5b8920bc33b6fadd7f18826118",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1360"
},
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "2796"
},
{
"name": "Python",
"bytes": "9860"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import operator
import logging
import numpy as np
import pandas as pd
from .contracts import contract
from .coordinates import Coordinates
from .visual import VisualAttributes
from .visual import COLORS
from .exceptions import IncompatibleAttribute
from .component_link import (ComponentLink, CoordinateComponentLink,
BinaryComponentLink)
from .subset import Subset, InequalitySubsetState, SubsetState
from .hub import Hub
from .util import split_component_view, row_lookup
from ..utils import unique, shape_to_string, view_shape, coerce_numeric, check_sorted
from .decorators import clear_cache
from .message import (DataUpdateMessage,
DataAddComponentMessage, NumericalDataChangedMessage,
SubsetCreateMessage, ComponentsChangedMessage,
ComponentReplacedMessage)
from ..compat.collections import OrderedDict
from ..external import six
__all__ = ['Data', 'ComponentID', 'Component', 'DerivedComponent',
'CategoricalComponent', 'CoordinateComponent']
# access to ComponentIDs via .item[name]
class ComponentIDDict(object):
def __init__(self, data, **kwargs):
self.data = data
def __getitem__(self, key):
result = self.data.find_component_id(key)
if result is None:
raise KeyError("ComponentID not found or not unique: %s"
% key)
return result
class ComponentID(object):
""" References a :class:`Component` object within a :class:`Data` object.
ComponentIDs behave as keys::
component_id = data.id[name]
data[component_id] -> numpy array
"""
def __init__(self, label, hidden=False):
""":param label: Name for the ID
:type label: str"""
self._label = str(label)
self._hidden = hidden
@property
def label(self):
return self._label
@label.setter
def label(self, value):
"""Change label.
.. warning::
Label changes are not currently tracked by client
classes. Label's should only be changd before creating other
client objects
"""
self._label = str(value)
@property
def hidden(self):
"""Whether to hide the component by default"""
return self._hidden
def __str__(self):
return str(self._label)
def __repr__(self):
return str(self._label)
def __eq__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.eq)
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __ne__(self, other):
if np.issubsctype(type(other), np.number):
return InequalitySubsetState(self, other, operator.ne)
return other is not self
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Notes
-----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
"""
:param data: The data to store
:type data: :class:`numpy.ndarray`
:param units: Optional unit label
:type units: str
"""
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def units(self):
return self._units
@units.setter
def units(self, value):
self._units = str(value)
@property
def hidden(self):
"""Whether the Component is hidden by default"""
return False
@property
def data(self):
""" The underlying :class:`numpy.ndarray` """
return self._data
@property
def shape(self):
""" Tuple of array dimensions """
return self._data.shape
@property
def ndim(self):
""" The number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric
"""
return np.can_cast(self.data[0], np.complex)
@property
def categorical(self):
"""
Whether or not the datatype is categorical
"""
return False
def __str__(self):
return "Component with shape %s" % shape_to_string(self.shape)
def jitter(self, method=None):
raise NotImplementedError
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
:param data: The data to pack into a Component (array-like)
:param units: Optional units
:type units: str
:returns: A Component (or subclass)
"""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.object_):
return CategoricalComponent(data, units=units)
n = coerce_numeric(data)
thresh = 0.5
try:
use_categorical = np.issubdtype(data.dtype, np.character) and \
np.isfinite(n).mean() <= thresh
except TypeError: # isfinite not supported. non-numeric dtype
use_categorical = True
if use_categorical:
return CategoricalComponent(data, units=units)
else:
return Component(n, units=units)
class DerivedComponent(Component):
""" A component which derives its data from a function """
def __init__(self, data, link, units=None):
"""
:param data: The data object to use for calculation
:type data: :class:`~glue.core.data.Data`
:param link: The link that carries out the function
:type link: :class:`~glue.core.component_link.ComponentLink`
:param units: Optional unit description
"""
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def hidden(self):
return self._link.hidden
@property
def data(self):
""" Return the numerical data as a numpy array """
return self._link.compute(self._data)
@property
def link(self):
""" Return the component link """
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
super(CoordinateComponent, self).__init__(None, None)
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
def _calculate(self, view=None):
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
if self.world:
world = self._data.coords.pixel2world(*grids[::-1])[::-1]
return world[self.axis]
else:
return grids[self.axis]
@property
def shape(self):
""" Tuple of array dimensions. """
return self._data.shape
@property
def ndim(self):
""" Number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
class CategoricalComponent(Component):
"""
Container for categorical data.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
"""
:param categorical_data: The underlying :class:`numpy.ndarray`
:param categories: List of unique values in the data
:jitter: Strategy for jittering the data
"""
super(CategoricalComponent, self).__init__(None, units)
self._categorical_data = np.asarray(categorical_data)
if self._categorical_data.ndim > 1:
raise ValueError("Categorical Data must be 1-dimensional")
# Disable changing of categories
self._categorical_data.setflags(write=False)
self._categories = categories
self._jitter_method = jitter
self._is_jittered = False
self._data = None
if self._categories is None:
self._update_categories()
else:
self._update_data()
@property
def categorical(self):
return True
def _update_categories(self, categories=None):
"""
:param categories: A sorted array of categories to find in the dataset.
If None the categories are the unique items in the data.
:return: None
"""
if categories is None:
categories, inv = unique(self._categorical_data)
self._categories = categories
self._data = inv.astype(np.float)
self._data.setflags(write=False)
self.jitter(method=self._jitter_method)
else:
if check_sorted(categories):
self._categories = categories
self._update_data()
else:
raise ValueError("Provided categories must be Sorted")
def _update_data(self):
"""
Converts the categorical data into the numeric representations given
self._categories
"""
self._is_jittered = False
self._data = row_lookup(self._categorical_data, self._categories)
self.jitter(method=self._jitter_method)
self._data.setflags(write=False)
def jitter(self, method=None):
"""
Jitter the data so the density of points can be easily seen in a
scatter plot.
:param method: None | 'uniform':
* None: No jittering is done (or any jittering is undone).
* uniform: A unformly distributed random variable (-0.5, 0.5)
is applied to each point.
:return: None
"""
if method not in set(['uniform', None]):
raise ValueError('%s jitter not supported' % method)
self._jitter_method = method
seed = 1234567890
rand_state = np.random.RandomState(seed)
if (self._jitter_method is None) and self._is_jittered:
self._update_data()
elif (self._jitter_method is 'uniform') and not self._is_jittered:
iswrite = self._data.flags['WRITEABLE']
self._data.setflags(write=True)
self._data += rand_state.uniform(-0.5, 0.5, size=self._data.shape)
self._is_jittered = True
self._data.setflags(write=iswrite)
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self._categorical_data.ravel(),
dtype=np.object, **kwargs)
class Data(object):
"""The basic data container in Glue.
The data object stores data as a collection of
:class:`~glue.core.data.Component` objects. Each component stored in a
dataset must have the same shape.
Catalog data sets are stored such that each column is a distinct
1-dimensional :class:`~glue.core.data.Component`.
There are several ways to extract the actual numerical data stored in a
:class:`~glue.core.data.Data` object::
data = Data(x=[1, 2, 3], label='data')
xid = data.id['x']
data[xid]
data.get_component(xid).data
data['x'] # if 'x' is a unique component name
Likewise, datasets support :ref:`fancy indexing <numpy:basics.indexing>`::
data[xid, 0:2]
data[xid, [True, False, True]]
See also: :ref:`data_tutorial`
"""
def __init__(self, label="", **kwargs):
"""
:param label: label for data
:type label: str
Extra array-like keywords are extracted into components
"""
# Coordinate conversion object
self.coords = Coordinates()
self._shape = ()
# Components
self._components = OrderedDict()
self._pixel_component_ids = []
self._world_component_ids = []
self.id = ComponentIDDict(self)
# Subsets of the data
self._subsets = []
# Hub that the data is attached to
self.hub = None
self.style = VisualAttributes(parent=self)
self._coordinate_links = None
self.data = self
self.label = label
self.edit_subset = None
for lbl, data in sorted(kwargs.items()):
self.add_component(data, lbl)
self._key_joins = {}
@property
def subsets(self):
"""
Tuple of subsets attached to this dataset
"""
return tuple(self._subsets)
@property
def ndim(self):
"""
Dimensionality of the dataset
"""
return len(self.shape)
@property
def shape(self):
"""
Tuple of array dimensions, like :attr:`numpy.ndarray.shape`
"""
return self._shape
@property
def label(self):
""" Convenience access to data set's label """
return self._label
@label.setter
def label(self, value):
""" Set the label to value
"""
self._label = value
self.broadcast(attribute='label')
@property
def size(self):
"""
Total number of elements in the dataset.
"""
return np.product(self.shape)
@contract(component=Component)
def _check_can_add(self, component):
if isinstance(component, DerivedComponent):
return component._data is self
else:
if len(self._components) == 0:
return True
return component.shape == self.shape
@contract(cid=ComponentID, returns=np.dtype)
def dtype(self, cid):
"""Lookup the dtype for the data associated with a ComponentID"""
# grab a small piece of data
ind = tuple([slice(0, 1)] * self.ndim)
arr = self[cid, ind]
return arr.dtype
@contract(component_id=ComponentID)
def remove_component(self, component_id):
""" Remove a component from a data set
:param component_id: the component to remove
:type component_id: :class:`~glue.core.data.ComponentID`
"""
if component_id in self._components:
self._components.pop(component_id)
@contract(other='isinstance(Data)',
cid='cid_like',
cid_other='cid_like')
def join_on_key(self, other, cid, cid_other):
"""
Create an *element* mapping to another dataset, by
joining on values of ComponentIDs in both datasets.
This join allows any subsets defined on `other` to be
propagated to self.
:param other: :class:`Data` to join with
:param cid: str or :class:`ComponentID` in this dataset to use as a key
:param cid_other: ComponentID in the other dataset to use as a key
:example:
>>> d1 = Data(x=[1, 2, 3, 4, 5], k1=[0, 0, 1, 1, 2], label='d1')
>>> d2 = Data(y=[2, 4, 5, 8, 4], k2=[1, 3, 1, 2, 3], label='d2')
>>> d2.join_on_key(d1, 'k2', 'k1')
>>> s = d1.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([False, False, True, True, True], dtype=bool)
>>> s = d2.new_subset()
>>> s.subset_state = d1.id['x'] > 2
>>> s.to_mask()
array([ True, False, True, True, False], dtype=bool)
The subset state selects the last 3 items in d1. These have
key values k1 of 1 and 2. Thus, the selected items in d2
are the elements where k2 = 1 or 2.
"""
_i1, _i2 = cid, cid_other
cid = self.find_component_id(cid)
cid_other = other.find_component_id(cid_other)
if cid is None:
raise ValueError("ComponentID not found in %s: %s" %
(self.label, _i1))
if cid_other is None:
raise ValueError("ComponentID not found in %s: %s" %
(other.label, _i2))
self._key_joins[other] = (cid, cid_other)
other._key_joins[self] = (cid_other, cid)
@contract(component='component_like', label='cid_like')
def add_component(self, component, label, hidden=False):
""" Add a new component to this data set.
:param component: object to add. Can be a Component,
array-like object, or ComponentLink
:param label:
The label. If this is a string,
a new :class:`ComponentID` with this label will be
created and associated with the Component
:type component: :class:`~glue.core.data.Component` or
array-like
:type label: :class:`str` or :class:`~glue.core.data.ComponentID`
:raises:
TypeError, if label is invalid
ValueError if the component has an incompatible shape
:returns:
The ComponentID associated with the newly-added component
"""
if isinstance(component, ComponentLink):
component = DerivedComponent(self, component)
if not isinstance(component, Component):
component = Component.autotyped(component)
if isinstance(component, DerivedComponent):
component.set_parent(self)
if not(self._check_can_add(component)):
raise ValueError("The dimensions of component %s are "
"incompatible with the dimensions of this data: "
"%r vs %r" % (label, component.shape, self.shape))
if isinstance(label, ComponentID):
component_id = label
else:
component_id = ComponentID(label, hidden=hidden)
is_present = component_id in self._components
self._components[component_id] = component
first_component = len(self._components) == 1
if first_component:
if isinstance(component, DerivedComponent):
raise TypeError("Cannot add a derived component as "
"first component")
self._shape = component.shape
self._create_pixel_and_world_components()
if self.hub and (not is_present):
msg = DataAddComponentMessage(self, component_id)
self.hub.broadcast(msg)
msg = ComponentsChangedMessage(self)
self.hub.broadcast(msg)
return component_id
@contract(link=ComponentLink,
label='cid_like|None',
returns=DerivedComponent)
def add_component_link(self, link, label=None):
""" Shortcut method for generating a new :class:`DerivedComponent`
from a ComponentLink object, and adding it to a data set.
:param link: :class:`~glue.core.component_link.ComponentLink`
:param label: The ComponentID or label to attach to.
:type label: :class:`~glue.core.data.ComponentID` or str
:returns:
The :class:`DerivedComponent` that was added
"""
if label is not None:
if not isinstance(label, ComponentID):
label = ComponentID(label)
link.set_to_id(label)
if link.get_to_id() is None:
raise TypeError("Cannot add component_link: "
"has no 'to' ComponentID")
dc = DerivedComponent(self, link)
to_ = link.get_to_id()
self.add_component(dc, to_)
return dc
def _create_pixel_and_world_components(self):
for i in range(self.ndim):
comp = CoordinateComponent(self, i)
label = pixel_label(i, self.ndim)
cid = self.add_component(comp, "Pixel %s" % label, hidden=True)
self._pixel_component_ids.append(cid)
if self.coords:
for i in range(self.ndim):
comp = CoordinateComponent(self, i, world=True)
label = self.coords.axis_label(i)
cid = self.add_component(comp, label, hidden=True)
self._world_component_ids.append(cid)
@property
def components(self):
""" All :class:`ComponentIDs <ComponentID>` in the Data
:rtype: list
"""
return sorted(self._components.keys(), key=lambda x: x.label)
@property
def visible_components(self):
""" :class:`ComponentIDs <ComponentID>` for all non-hidden components.
:rtype: list
"""
return [cid for cid, comp in self._components.items()
if not cid.hidden and not comp.hidden]
@property
def primary_components(self):
"""The ComponentIDs not associated with a :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
not isinstance(self._components[c], DerivedComponent)]
@property
def derived_components(self):
"""The ComponentIDs for each :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
isinstance(self._components[c], DerivedComponent)]
@property
def pixel_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each pixel coordinate.
"""
return self._pixel_component_ids
@property
def world_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each world coordinate.
"""
return self._world_component_ids
@contract(label='cid_like', returns='inst($ComponentID)|None')
def find_component_id(self, label):
""" Retrieve component_ids associated by label name.
:param label: ComponentID or string to search for
:returns:
The associated ComponentID if label is found and unique, else None
"""
result = [cid for cid in self.component_ids() if
cid.label == label or cid is label]
if len(result) == 1:
return result[0]
@property
def coordinate_links(self):
"""A list of the ComponentLinks that connect pixel and
world. If no coordinate transformation object is present,
return an empty list.
"""
if self._coordinate_links:
return self._coordinate_links
if not self.coords:
return []
if self.ndim != len(self._pixel_component_ids) or \
self.ndim != len(self._world_component_ids):
# haven't populated pixel, world coordinates yet
return []
def make_toworld_func(i):
def pix2world(*args):
return self.coords.pixel2world(*args[::-1])[::-1][i]
return pix2world
def make_topixel_func(i):
def world2pix(*args):
return self.coords.world2pixel(*args[::-1])[::-1][i]
return world2pix
result = []
for i in range(self.ndim):
link = CoordinateComponentLink(self._pixel_component_ids,
self._world_component_ids[i],
self.coords, i)
result.append(link)
link = CoordinateComponentLink(self._world_component_ids,
self._pixel_component_ids[i],
self.coords, i, pixel2world=False)
result.append(link)
self._coordinate_links = result
return result
@contract(axis=int, returns=ComponentID)
def get_pixel_component_id(self, axis):
"""Return the pixel :class:`ComponentID` associated with a given axis
"""
return self._pixel_component_ids[axis]
@contract(axis=int, returns=ComponentID)
def get_world_component_id(self, axis):
"""Return the world :class:`ComponentID` associated with a given axis
"""
return self._world_component_ids[axis]
@contract(returns='list(inst($ComponentID))')
def component_ids(self):
"""
Equivalent to :attr:`Data.components`
"""
return list(self._components.keys())
@contract(subset='isinstance(Subset)|None',
color='color|None',
label='string|None',
returns=Subset)
def new_subset(self, subset=None, color=None, label=None, **kwargs):
"""
Create a new subset, and attach to self.
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
:param subset: optional, reference subset or subset state.
If provided, the new subset will copy the logic of
this subset.
:returns: The new subset object
"""
nsub = len(self.subsets)
color = color or COLORS[nsub % len(COLORS)]
label = label or "%s.%i" % (self.label, nsub + 1)
new_subset = Subset(self, color=color, label=label, **kwargs)
if subset is not None:
new_subset.subset_state = subset.subset_state.copy()
self.add_subset(new_subset)
return new_subset
@contract(subset='inst($Subset, $SubsetState)')
def add_subset(self, subset):
"""Assign a pre-existing subset to this data object.
:param subset: A :class:`~glue.core.subset.Subset` or
:class:`~glue.core.subset.SubsetState` object
If input is a :class:`~glue.core.subset.SubsetState`,
it will be wrapped in a new Subset automatically
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
"""
if subset in self.subsets:
return # prevents infinite recursion
if isinstance(subset, SubsetState):
# auto-wrap state in subset
state = subset
subset = Subset(None)
subset.subset_state = state
self._subsets.append(subset)
if subset.data is not self:
subset.do_broadcast(False)
subset.data = self
subset.label = subset.label # hacky. disambiguates name if needed
if self.hub is not None:
msg = SubsetCreateMessage(subset)
self.hub.broadcast(msg)
subset.do_broadcast(True)
@contract(hub=Hub)
def register_to_hub(self, hub):
""" Connect to a hub.
This method usually doesn't have to be called directly, as
DataCollections manage the registration of data objects
"""
if not isinstance(hub, Hub):
raise TypeError("input is not a Hub object: %s" % type(hub))
self.hub = hub
@contract(attribute='string')
def broadcast(self, attribute):
"""
Send a :class:`~glue.core.message.DataUpdateMessage` to the hub
:param attribute: Name of an attribute that has changed (or None)
:type attribute: string
"""
if not self.hub:
return
msg = DataUpdateMessage(self, attribute=attribute)
self.hub.broadcast(msg)
@contract(old=ComponentID, new=ComponentID)
def update_id(self, old, new):
"""Reassign a component to a different :class:`ComponentID`
:param old: The old :class:`ComponentID`.
:param new: The new :class:`ComponentID`.
"""
if new is old:
return
changed = False
if old in self._components:
self._components[new] = self._components[old]
changed = True
try:
index = self._pixel_component_ids.index(old)
self._pixel_component_ids[index] = new
changed = True
except ValueError:
pass
try:
index = self._world_component_ids.index(old)
self._world_component_ids[index] = new
changed = True
except ValueError:
pass
if changed and self.hub is not None:
# promote hidden status
new._hidden = new.hidden and old.hidden
# remove old component and broadcast the change
# see #508 for discussion of this
self._components.pop(old)
msg = ComponentReplacedMessage(self, old, new)
self.hub.broadcast(msg)
def __str__(self):
s = "Data Set: %s" % self.label
s += "Number of dimensions: %i\n" % self.ndim
s += "Shape: %s\n" % ' x '.join([str(x) for x in self.shape])
s += "Components:\n"
for i, component in enumerate(self._components):
s += " %i) %s\n" % (i, component)
return s[:-1]
def __repr__(self):
return 'Data (label: %s)' % self.label
def __setattr__(self, name, value):
if name == "hub" and hasattr(self, 'hub') \
and self.hub is not value and self.hub is not None:
raise AttributeError("Data has already been assigned "
"to a different hub")
object.__setattr__(self, name, value)
def __getitem__(self, key):
""" Shortcut syntax to access the numerical data in a component.
Equivalent to:
``component = data.get_component(component_id).data``
:param key:
The component to fetch data from
:type key: :class:`~glue.core.data.ComponentID`
:returns: :class:`~numpy.ndarray`
"""
key, view = split_component_view(key)
if isinstance(key, six.string_types):
_k = key
key = self.find_component_id(key)
if key is None:
raise IncompatibleAttribute(_k)
if isinstance(key, ComponentLink):
return key.compute(self, view)
try:
comp = self._components[key]
except KeyError:
raise IncompatibleAttribute(key)
shp = view_shape(self.shape, view)
if view is not None:
result = comp[view]
else:
result = comp.data
assert result.shape == shp, \
"Component view returned bad shape: %s %s" % (result.shape, shp)
return result
def __setitem__(self, key, value):
"""
Wrapper for data.add_component()
"""
self.add_component(value, key)
@contract(component_id='cid_like|None', returns=Component)
def get_component(self, component_id):
"""Fetch the component corresponding to component_id.
:param component_id: the component_id to retrieve
"""
if component_id is None:
raise IncompatibleAttribute()
if isinstance(component_id, six.string_types):
component_id = self.id[component_id]
try:
return self._components[component_id]
except KeyError:
raise IncompatibleAttribute(component_id)
def to_dataframe(self, index=None):
""" Convert the Data object into a pandas.DataFrame object
:param index: Any 'index-like' object that can be passed to the pandas.Series constructor
:return: pandas.DataFrame
"""
h = lambda comp: self.get_component(comp).to_series(index=index)
df = pd.DataFrame(dict((comp.label, h(comp)) for comp in self.components))
order = [comp.label for comp in self.components]
return df[order]
@contract(mapping="dict(inst($Component, $ComponentID):array_like)")
def update_components(self, mapping):
"""
Change the numerical data associated with some of the Components
in this Data object.
All changes to component numerical data should use this method,
which broadcasts the state change to the appropriate places.
:param mapping: A dict mapping Components or ComponenIDs to arrays.
This method has the following restrictions:
- New compoments must have the same shape as old compoments
- Component subclasses cannot be updated.
"""
for comp, data in mapping.items():
if isinstance(comp, ComponentID):
comp = self.get_component(comp)
data = np.asarray(data)
if data.shape != self.shape:
raise ValueError("Cannot change shape of data")
comp._data = data
# alert hub of the change
if self.hub is not None:
msg = NumericalDataChangedMessage(self)
self.hub.broadcast(msg)
for subset in self.subsets:
clear_cache(subset.subset_state.to_mask)
@contract(i=int, ndim=int)
def pixel_label(i, ndim):
if ndim == 2:
return ['y', 'x'][i]
if ndim == 3:
return ['z', 'y', 'x'][i]
return "Axis %s" % i
| {
"content_hash": "34923ad613da18181238a41ed7f23cfb",
"timestamp": "",
"source": "github",
"line_count": 1154,
"max_line_length": 97,
"avg_line_length": 31.4367417677643,
"alnum_prop": 0.5859474061414631,
"repo_name": "JudoWill/glue",
"id": "93169d126b47eeb71243f854f77737607a717b6c",
"size": "36278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/core/data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "1387891"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.