content
stringlengths 5
1.05M
|
|---|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score, accuracy_score
import matplotlib.pyplot as plt
from numpy import array
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
from scipy.interpolate import spline
def evaluateClassifiers(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = pl.subplots(2, 3, figsize = (11,7))
# Constants
bar_width = 1
colors = ['#A00000','#00A0A0','#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
# Creative plot code
ax[j/3, j%3].bar(k*bar_width, results[learner][metric], width = bar_width, color = colors[k])
#ax[j/3, j%3].set_xticks([0.45, 1.45, 2.45])
#ax[j/3, j%3].set_xticklabels(["1%", "10%", "100%"])
#ax[j/3, j%3].set_xlabel("Training Set Size")
#ax[j/3, j%3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Create patches for the legend
patches = []
for i, learner in enumerate(results.keys()):
patches.append(mpatches.Patch(color = colors[i], label = learner))
pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \
loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large')
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show()
def featureImportance(X_train, y_train):
from sklearn.ensemble import RandomForestClassifier
feat_labels = X_train.columns[:]
forest = RandomForestClassifier(n_estimators=1000, random_state=0, n_jobs=-1)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[[f]]]))
plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]),
importances[indices],
color='lightblue',
align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()
def evaluateClassifierPrediction(results, classifier_name, metric):
acc_test_values = []
acc_train_values = []
labels = []
for key in results:
acc_test_values.append(array([results[key][n][classifier_name][metric + '_train'] for n in results[key]]).mean())
acc_train_values.append(array([results[key][n][classifier_name][metric + '_test'] for n in results[key]]).mean())
labels.append(key)
import matplotlib.pyplot as plt
plt.plot(range(1, len(acc_test_values) + 1), acc_test_values)
plt.plot(range(1, len(acc_train_values) + 1), acc_train_values)
ax = plt.axes()
ax.set_ylim([0, 1])
ax.set_xticks(range(1, len(acc_test_values) + 1))
plt.xlabel('Token')
ax.grid()
plt.legend(['Test','Train'], loc=4)
plt.title('Accuracy')
plt.show()
print(labels)
def ResultOutput(dataframe):
df = dataframe.copy()
idx = df.groupby(['Ticker'])['R2 test'].transform(max) == df['R2 test']
df = df[idx]
print(df)
def PlotR2Score(df, title, score):
# data to plot
n_groups = 5
score_AAPL = df[(df['Ticker'] == 'AAPL')]['R2 test'].as_matrix().tolist()
score_MSFT = df[(df['Ticker'] == 'MSFT')]['R2 test'].as_matrix().tolist()
score_ACN = df[(df['Ticker'] == 'ACN')]['R2 test'].as_matrix().tolist()
score_GOOG = df[(df['Ticker'] == 'GOOG')]['R2 test'].as_matrix().tolist()
score_CSCO = df[(df['Ticker'] == 'CSCO')]['R2 test'].as_matrix().tolist()
score_EBAY = df[(df['Ticker'] == 'EBAY')]['R2 test'].as_matrix().tolist()
score_EA = df[(df['Ticker'] == 'EA')]['R2 test'].as_matrix().tolist()
score_HP = df[(df['Ticker'] == 'HP')]['R2 test'].as_matrix().tolist()
score_IBM = df[(df['Ticker'] == 'IBM')]['R2 test'].as_matrix().tolist()
score_INTC = df[(df['Ticker'] == 'INTC')]['R2 test'].as_matrix().tolist()
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.05
opacity = 0.8
rectsAAPL = plt.bar(index, score_AAPL, bar_width, alpha=opacity, label='AAPL')
rectsMSFT = plt.bar(index + bar_width, score_MSFT, bar_width, alpha=opacity, label='MSFT')
rectsACN = plt.bar(index + 2 * bar_width, score_ACN, bar_width, alpha=opacity, label='ACN')
rectsGOOG = plt.bar(index + 3 * bar_width, score_GOOG, bar_width, alpha=opacity, label='GOOG')
rectsCSCO = plt.bar(index + 4 * bar_width, score_CSCO, bar_width, alpha=opacity, label='CSCO')
rectsEBAY = plt.bar(index + 5 * bar_width, score_EBAY, bar_width, alpha=opacity, label='EBAY')
rectsEA = plt.bar(index + 6 * bar_width, score_EA, bar_width, alpha=opacity, label='EA')
rectsHP = plt.bar(index + 7 * bar_width, score_HP, bar_width, alpha=opacity, label='HP')
rectsIBM = plt.bar(index + 8 * bar_width, score_IBM, bar_width, alpha=opacity, label='IBM')
rectsINTC = plt.bar(index + 9 * bar_width, score_INTC, bar_width, alpha=opacity, label='INTC')
legend = ax.legend(loc='lower center', bbox_to_anchor=(1.1, 0.2), shadow=True)
plt.xlabel('Algorithm')
plt.ylabel('Score')
plt.title(title)
plt.xticks(index + bar_width, ('SVR', 'Ada', 'Lasso', 'Ridge', 'Linear'))
if score == 'mse':
plt.ylim(-0.25, np.max(df['Score'].as_matrix()) + 1)
else:
plt.ylim(-0.25, 1)
plt.tight_layout()
plt.show()
def ResisualPlot(y_train, y_train_pred, y_test, y_test_pred):
plt.scatter(y_train_pred,
y_train_pred - y_train,
c='steelblue',
edgecolor='white',
marker='o',
s=35,
alpha=0.9,
label='training data')
plt.scatter(y_test_pred,
y_test_pred - y_test,
c='limegreen',
edgecolor='white',
marker='s',
s=35,
alpha=0.9,
label='test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='black')
plt.xlim([-10, 50])
plt.tight_layout()
# plt.savefig('images/10_14.png', dpi=300)
plt.show()
|
"""
Standard algorithms for graphs with networkx library
"""
from scipy.io import mmread # only for loading mtx files
import networkx as nx
def std_undir_graph_from_mm_file(path):
return nx.from_scipy_sparse_matrix(mmread(path))
def std_dir_graph_from_mm_file(path):
return nx.from_scipy_sparse_matrix(mmread(path), create_using=nx.DiGraph)
def std_bfs(graph, src_vertex):
"""
Computes traversal level for each vertex from source vertex.
graph: any graph
src_vertex: source vertex
return: Vector of distances to visited vertices
"""
# this sssp yields (node, level) in a breadth first search
res = nx.single_source_shortest_path_length(graph, src_vertex)
return [dist+1 for _, dist in sorted(res.items())]
def std_triangles_count(graph):
"""
Computes the number of triangles in the graph.
graph: undirected graph
return: (int) number of triangles
"""
if nx.is_directed(graph):
raise Exception("Graph is not undirected")
return sum(nx.triangles(graph).values()) // 3
def std_bellman_ford(graph, src_vertex):
"""
From a given start vertex, finds the shortest paths to every other
(reachable) vertex in the graph.
graph: weighted graph
src_vertex: source vertex
return: Vector of computed distances
"""
res = nx.single_source_bellman_ford_path_length(graph, src_vertex)
return [dist for _, dist in sorted(res.items())]
|
# script was primarily written by Miles McCain, it should be in a notebook but I'm lazy
import numpy as np
import pandas as pd
from datetime import datetime
import os
import csv
import pickle
# Category deduplication
REWRITE_CATEGORIES = {
"Business Day": "Business",
"nan": "Unknown",
"New York and Region": "N.Y. / Region",
"false": "Unknown",
"Crosswords/Games": "Crosswords & Games",
"Multimedia/Photos": "Multimedia",
"Home and Garden": "Home & Garden",
"Autos": "Automobiles",
"Great Homes and Destinations": "Great Homes, Destinations",
"Style": "Fashion & Style",
"Dining and Wine": "Dining & Wine"
}
def _rewrite_category(category):
"""Standardize the category name by performing a
rewrite if necessary.
Arguments:
category {string} -- the name of the category
Returns:
string -- the standardized category
"""
if category in REWRITE_CATEGORIES:
return REWRITE_CATEGORIES[category]
return category
def _unpack_categories(reported_category):
"""Utility method to get all the subcategories,
separated by a semicolon.
Arguments:
reported_category {string} -- semicolon-separated supercategory
Returns:
[String] -- array of subcategory strings
"""
return [_rewrite_category(category.strip()) for category in reported_category.split(";")]
def _process_row(k):
k['section_name'] = _unpack_categories(str(k['section_name']))
k['pub_date'] = datetime.strptime(k['pub_date'], "%Y-%m-%d").date()
return k
def load_all_data():
"""Load all the CSVs in /data into a single
dataframe.
Returns:
dataframe -- all the data
"""
dataframes = []
for data_file in os.listdir("data_conserv/"):
if '.csv' in data_file:
data = pd.read_csv("data_conserv/" + data_file)
dataframes.append(data)
dataframe = pd.concat(dataframes)
dataframe.apply(lambda k: _process_row(k), axis=1)
return dataframe
def get_percent_by_women(dataframe, fil):
total = 0
matched = 0
for index, row in dataframe.iterrows():
if fil(row):
total += 1
if row["gender"] == "F":
matched += 1
if total == 0:
return None
return float(matched) / total
def get_total_num_women(dataframe, fil):
"""Counts total women in category
Returns:
[list] -- total number of women, total number in category
"""
total = 0
matched = 0
for index, row in dataframe.iterrows():
if fil(row):
total += 1
if row["gender"] == "F":
matched += 1
return matched
def _get_unique_categories(dataframe):
"""Utility method to get the unique categories in the dataframe, unpacked
and standardized.
Arguments:
dataframe {dataframe} -- the dataframe which contains the NYT data
Returns:
[String] -- array of the unique categories
"""
categories = set()
for reported_category in dataframe.section_name.unique():
for found_category in _unpack_categories(str(reported_category)):
categories.add(found_category)
return categories
def load_monthly_stats(data_dict):
with open('monthly_stats2018.pickle', 'wb') as handle:
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
data = load_all_data()
# this will also dedup categories
# Example: how to get example articles for any given month
# get_percent_by_women(data, lambda k: k['pub_date'].month == 6 and k['pub_date'].year == 2013 and 'Sports' in k['section_name'])
# Get all the unique categories
all_unique_categories = set()
for categories in data['section_name']:
for subcategory in categories:
all_unique_categories.add(subcategory)
monthly_stats = {}
for year in range(2011, 2017):
monthly_stats[str(year)] = {}
for month in range(1, 13):
monthly_stats[str(year)][str(month)] = {}
for category in all_unique_categories:
monthly_stats[str(year)][str(month)][category] = {
"total": 0, # total number of articles
"women": 0 # number of those articles by women
}
for index, row in data.iterrows():
year = str(row['pub_date'].year)
month = str(row['pub_date'].month)
for category in row['section_name']:
monthly_stats[year][month][category]["total"] += 1
if row["gender"] == "F":
monthly_stats[year][month][category]["women"] += 1
load_monthly_stats(monthly_stats)
# with open('monthly_stats_to_date.csv', mode='w') as csv_file:
# nyt_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# columns = ["Year", "Month"]
# columns.extend(all_unique_categories)
# nyt_writer.writerow(columns) # this is the line in question -- a bytes-like object is required, not 'str'
# for year in range(2011, 2017):
# for month in range(1, 13):
# row = [str(year), str(month)]
# for category in all_unique_categories:
# women = float(monthly_stats[str(year)][str(month)][category]["women"])
# total = float(monthly_stats[str(year)][str(month)][category]["total"])
# if total == 0:
# row.append(None)
# else:
# row.append(women/total*100.0)
# nyt_writer.writerow(row)
|
"""Strategy objects for creating ABINIT calculations."""
from __future__ import division, print_function
import abc
import collections
import copy
import numpy as np
from pprint import pprint, pformat
from pymatgen.util.string_utils import str_aligned, str_delimited, is_string, list_strings
from pymatgen.io.abinitio.abiobjects import SpinMode, Smearing, Electrons
from pymatgen.io.abinitio.pseudos import PseudoTable
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
def select_pseudos(pseudos, structure, ret_table=True):
"""
Given a list of pseudos and a pymatgen structure, extract the pseudopotentials
for the calculation (useful when we receive an entire periodic table).
Raises:
ValueError if no pseudo is found or multiple occurrences are found.
"""
table = PseudoTable.astable(pseudos)
pseudos = []
for symbol in structure.types_of_specie:
# Get the list of pseudopotentials in table from atom symbol.
pseudos_for_type = table.pseudos_with_symbol(symbol)
if not pseudos_for_type:
raise ValueError("Cannot find pseudo for symbol %s" % symbol)
if len(pseudos_for_type) > 1:
raise ValueError("Find multiple pseudos for symbol %s" % symbol)
pseudos.append(pseudos_for_type[0])
if ret_table:
return PseudoTable(pseudos)
else:
return pseudos
def order_pseudos(pseudos, structure):
return select_pseudos(pseudos, structure, ret_table=False)
class Strategy(object):
"""
A Strategy object generates the ABINIT input file used for a particular type of calculation
e.g. ground-state runs, structural relaxations, self-energy calculations ...
A Strategy can absorb data (e.g. data produced in the previous steps of a workflow) and
can use this piece of information to generate/optimize the input variables.
Strategy objects must provide the method make_input that builds and returns the abinit input file.
Attributes:
accuracy:
Accuracy of the calculation used to define basic parameters of the run.
such as tolerances, basis set truncation ...
pseudos:
List of pseudopotentials.
"""
__metaclass__ = abc.ABCMeta
# Mapping runlevel --> optdriver variable
_runl2optdriver = {
"scf" : 0 ,
"nscf" : 0 ,
"relax" : 0 ,
"dfpt" : 1 ,
"screening": 3 ,
"sigma" : 4 ,
"bse" : 99,
}
# Name of the (default) tolerance used by the runlevels.
_runl2tolname = {
"scf" : 'tolvrs',
"nscf" : 'tolwfr',
"dfpt" : 'toldfe', # ?
"screening": 'toldfe', # dummy
"sigma" : 'toldfe', # dummy
"bse" : 'toldfe', # ?
}
# Tolerances for the different levels of accuracy.
T = collections.namedtuple('Tolerance', "low normal high")
_tolerances = {
"toldfe": T(1.e-7, 1.e-8, 1.e-9),
"tolvrs": T(1.e-7, 1.e-8, 1.e-9),
"tolwfr": T(1.e-15, 1.e-17, 1.e-19),
"tolrdf": T(0.04, 0.02, 0.01),
}
del T
def __str__(self):
return "<%s at %s, accuracy = %s>" % (self.__class__.__name__, id(self), self.accuracy)
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
@abc.abstractproperty
def runlevel(self):
"""String defining the Runlevel. See _runl2optdriver."""
@property
def optdriver(self):
"""The optdriver associated to the calculation."""
return self._runl2optdriver[self.runlevel]
def learn(self, **data):
"""Update the data stored in self."""
if not hasattr(self, "_data"):
self._data = dict(data)
else:
if [k in self._data for k in data].count(True) != 0:
raise ValueError("Keys %s are already present in data" % str([k for k in data]))
self._data.update(data)
@property
def accuracy(self):
"""Accuracy used by the strategy."""
try:
return self._accuracy
except AttributeError:
self.set_accuracy("normal")
return self._accuracy
def set_accuracy(self, accuracy):
"""Accuracy setter."""
if hasattr(self, "_accuracy"):
raise RuntimeError("object already has accuracy %s " % self._accuracy)
assert accuracy in ["low", "normal", "high",]
self._accuracy = accuracy
@property
def data(self):
"""Data absorbed by the strategy during the workflow."""
try:
return self. _data
except AttributeError:
return {}
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.pseudos.allnc
@property
def ispaw(self):
"""True if PAW calculation."""
return self.pseudos.allpaw
@property
def ecut(self):
"""Cutoff energy in Hartree."""
try:
# User option.
return self.extra_abivars["ecut"]
except KeyError:
# Compute ecut from the Pseudo Hints.
hints = [p.hint_for_accuracy(self.accuracy) for p in self.pseudos]
return max(hint.ecut for hint in hints)
@property
def pawecutdg(self):
"""Cutoff energy in Hartree for the dense grid used in PAW calculations."""
if not self.ispaw:
return None
try:
return self.extra_abivars["pawecutdg"] # User option.
except KeyError:
raise NotImplementedError("")
#ratio = max(p.suggested_augratio(accuracy) for p in self.pseudos])
#ratio = augration_high if high else augratio_norm
#pawecutdg = ecut * ratio
@property
def tolerance(self):
"""Return a dict {varname: varvalue} with the tolerance used for the calculation."""
# Check user options first.
for tolname in self._tolerances:
try:
return {tolname: self.extra_abivars[tolname]}
except KeyError:
pass
# Use default values depending on the runlevel and the accuracy.
tolname = self._runl2tolname[self.runlevel]
return {tolname: getattr(self._tolerances[tolname], self.accuracy)}
@property
def need_forces(self):
"""True if forces are required at each SCF step (like the stresses)."""
return self.runlevel in ["relax",]
@property
def need_stress(self):
"""True if the computation of the stress is required."""
# TODO: here it's easier to check if optcell != 0
return self.runlevel in ["relax",]
def add_extra_abivars(self, abivars):
"""Add variables (dict) to extra_abivars."""
self.extra_abivars.update(abivars)
def remove_extra_abivars(self, keys):
for key in keys:
self.extra_abivars.pop(key)
@abc.abstractmethod
def make_input(self, *args, **kwargs):
"""Returns an Input instance."""
class ScfStrategy(Strategy):
"""
Strategy for ground-state SCF calculations.
"""
def __init__(self, structure, pseudos, ksampling, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, use_symmetries=True, **extra_abivars):
"""
Args:
structure:
pymatgen structure
pseudos:
List of pseudopotentials.
ksampling:
Ksampling object defining the sampling of the BZ.
accuracy:
Accuracy of the calculation.
spin_mode:
Spin polarization mode.
smearing:
string or Smearing instance.
charge:
Total charge of the system. Default is 0.
scf_algorithm:
ElectronsAlgorithm instance.
use_symmetries:
False if point group symmetries should not be used.
extra_abivars:
Extra variables that will be directly added to the input file.
"""
super(ScfStrategy, self).__init__()
self.set_accuracy(accuracy)
self.structure = structure
self.pseudos = select_pseudos(pseudos, structure)
self.ksampling = ksampling
self.use_symmetries = use_symmetries
self.electrons = Electrons(spin_mode=spin_mode,
smearing=smearing,
algorithm=scf_algorithm,
nband=None,
fband=None,
charge=charge,
)
self.extra_abivars = extra_abivars
@property
def runlevel(self):
return "scf"
def make_input(self):
extra = dict(
optdriver=self.optdriver,
ecut=self.ecut,
pawecutdg=self.pawecutdg,
)
extra.update(self.tolerance)
extra.update({"nsym": 1 if not self.use_symmetries else None})
extra.update(self.extra_abivars)
input = InputWriter(self.structure, self.electrons, self.ksampling, **extra)
return input.get_string()
class NscfStrategy(Strategy):
"""
Strategy for non-self-consistent calculations.
"""
def __init__(self, scf_strategy, ksampling, nscf_nband, nscf_algorithm=None, **extra_abivars):
"""
Args:
scf_strategy:
ScfStrategy used for the GS run.
ksampling:
Ksampling object defining the sampling of the BZ.
nscf_nband:
Number of bands to compute.
nscf_algorithm
ElectronsAlgorithm instance.
extra_abivars:
Extra ABINIT variables that will be directly added to the input file
"""
super(NscfStrategy, self).__init__()
self.set_accuracy(scf_strategy.accuracy)
self.scf_strategy = scf_strategy
self.nscf_nband = nscf_nband
self.pseudos = scf_strategy.pseudos
self.ksampling = ksampling
if nscf_algorithm is None:
nscf_algorithm = {"iscf": -2}
# Electrons used in the GS run.
scf_electrons = scf_strategy.electrons
self.electrons = Electrons(spin_mode=scf_electrons.spin_mode,
smearing=scf_electrons.smearing,
algorithm=nscf_algorithm,
nband=nscf_nband,
fband=None,
charge=scf_electrons.charge,
comment=None,
#occupancies = None,
)
self.extra_abivars = extra_abivars
@property
def runlevel(self):
return "nscf"
def make_input(self):
# Initialize the system section from structure.
scf_strategy = self.scf_strategy
extra = dict(
optdriver=self.optdriver,
ecut=self.ecut,
pawecutdg=self.pawecutdg,
)
extra.update(self.tolerance)
extra.update(self.extra_abivars)
input = InputWriter(scf_strategy.structure, self.electrons, self.ksampling, **extra)
return input.get_string()
class RelaxStrategy(ScfStrategy):
"""Extends ScfStrategy by adding an algorithm for the structural relaxation."""
def __init__(self, structure, pseudos, ksampling, relax_algo, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_solver=None, **extra_abivars):
"""
Args:
structure:
pymatgen structure
pseudos:
List of pseudopotentials.
ksampling:
`Ksampling` object defining the sampling of the BZ.
relax_algo:
Object defining the algorithm for the structural relaxation.
accuracy:
Accuracy of the calculation.
spin_mode:
Flag defining the spin polarization. Defaults to "polarized"
smearing:
String or `Smearing` instance.
charge:
Total charge of the system. Default is 0.
scf_algorithm:
`ElectronsAlgorithm` instance.
extra_abivars:
Extra ABINIT variables that will be directly added to the input file
"""
super(RelaxStrategy, self).__init__(structure, pseudos, ksampling,
accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
charge=charge, scf_algorithm=scf_algorithm, **extra_abivars)
self.relax_algo = relax_algo
@property
def runlevel(self):
return "scf"
def make_input(self):
# Input for the GS run
input_str = super(RelaxStrategy, self).make_input()
# Add the variables for the structural relaxation.
input = InputWriter(self.relax_algo)
input_str += input.get_string()
return input_str
class ScreeningStrategy(Strategy):
"""Strategy for Screening calculations."""
def __init__(self, scf_strategy, nscf_strategy, screening, **extra_abivars):
"""
Args:
scf_strategy:
Strategy used for the ground-state calculation
nscf_strategy:
Strategy used for the non-self consistent calculation
screening:
Screening instance
extra_abivars:
Extra ABINIT variables added directly to the input file
"""
super(ScreeningStrategy, self).__init__()
self.pseudos = scf_strategy.pseudos
self.scf_strategy = scf_strategy
self.nscf_strategy = nscf_strategy
self.screening = screening
scr_nband = screening.nband
scf_electrons = scf_strategy.electrons
nscf_electrons = nscf_strategy.electrons
if scr_nband > nscf_electrons.nband:
raise ValueError("Cannot use more that %d bands for the screening" % nscf_electrons.nband)
self.ksampling = nscf_strategy.ksampling
if not self.ksampling.is_homogeneous:
raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
self.electrons = Electrons(spin_mode=scf_electrons.spin_mode,
smearing =scf_electrons.smearing,
nband=scr_nband,
charge=scf_electrons.charge,
comment=None,
)
self.extra_abivars = extra_abivars
@property
def runlevel(self):
return "screening"
def make_input(self):
# FIXME
extra = dict(
optdriver=self.optdriver,
ecut=self.ecut,
ecutwfn=self.ecut,
#pawecutdg=self.pawecutdg,
)
extra.update(self.tolerance)
extra.update(self.extra_abivars)
input = InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.screening, **extra)
return input.get_string()
class SelfEnergyStrategy(Strategy):
"""Strategy for self-energy calculations."""
def __init__(self, scf_strategy, nscf_strategy, scr_strategy, sigma, **extra_abivars):
"""
Args:
scf_strategy:
Strategy used for the ground-state calculation
nscf_strategy:
Strategy used for the non-self consistent calculation
scr_strategy:
Strategy used for the screening calculation
sigma:
SelfEnergy instance.
extra_abivars:
Extra ABINIT variables added directly to the input file
"""
# TODO Add consistency check between SCR and SIGMA strategies
super(SelfEnergyStrategy, self).__init__()
self.pseudos = scf_strategy.pseudos
self.scf_strategy = scf_strategy
self.nscf_strategy = nscf_strategy
self.scr_strategy = scr_strategy
self.sigma = sigma
self.extra_abivars = extra_abivars
scf_electrons = scf_strategy.electrons
nscf_electrons = nscf_strategy.electrons
if sigma.nband > nscf_electrons.nband:
raise ValueError("Cannot use more that %d bands for the self-energy" % nscf_electrons.nband)
self.ksampling = nscf_strategy.ksampling
if not self.ksampling.is_homogeneous:
raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
self.electrons = Electrons(spin_mode = scf_electrons.spin_mode,
smearing = scf_electrons.smearing,
nband = sigma.nband,
charge = scf_electrons.charge,
comment = None,
)
@property
def runlevel(self):
return "sigma"
def make_input(self):
# FIXME
extra = dict(
optdriver=self.optdriver,
ecut=self.ecut,
ecutwfn=self.ecut,
# "pawecutdg": self.pawecutdg,
)
extra.update(self.tolerance)
extra.update(self.extra_abivars)
input = InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.sigma, **extra)
return input.get_string()
class MDFBSE_Strategy(Strategy):
"""
Strategy for Bethe-Salpeter calculation based on the
model dielectric function and the scissors operator
"""
def __init__(self, scf_strategy, nscf_strategy, exc_ham, **extra_abivars):
"""
Args:
scf_strategy:
Strategy used for the ground-state calculation.
nscf_strategy:
Strategy used for the non-self consistent calculation.
exc_ham:
`ExcitonicHamiltonian` instance.
extra_abivars:
Extra ABINIT variables added directly to the input file.
"""
super(MDFBSE_Strategy, self).__init__()
self.pseudos = scf_strategy.pseudos
self.scf_strategy = scf_strategy
self.nscf_strategy = nscf_strategy
self.exc_ham = exc_ham
self.extra_abivars = extra_abivars
scf_electrons = scf_strategy.electrons
nscf_electrons = nscf_strategy.electrons
if exc_ham.nband > nscf_electrons.nband:
raise ValueError("Cannot use more that %d bands for the EXC hamiltonian." % nscf_electrons.nband)
self.ksampling = nscf_strategy.ksampling
if not self.ksampling.is_homogeneous:
raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
self.electrons = Electrons(spin_mode=scf_electrons.spin_mode,
smearing=scf_electrons.smearing,
nband=exc_ham.nband,
charge= scf_electrons.charge,
comment=None,
)
@property
def runlevel(self):
return "bse"
def make_input(self):
# FIXME
extra = dict(
optdriver=self.optdriver,
ecut=self.ecut,
ecutwfn=self.ecut,
#pawecutdg=self.pawecutdg,
)
#extra.update(self.tolerance)
extra.update(self.extra_abivars)
input = InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.exc_ham, **extra)
return input.get_string()
class InputWriter(object):
"""
This object receives a list of `AbivarAble` objects, an optional
dictionary with extra ABINIT variables and produces a (nicely formatted?)
string with the input file.
"""
def __init__(self, *args, **kwargs):
self.abiobj_dict = collections.OrderedDict()
self.extra_abivars = collections.OrderedDict()
for arg in args:
if hasattr(arg, "to_abivars"):
self.add_abiobj(arg)
else:
self.add_extra_abivars(arg)
for (k,v) in kwargs.items():
self.add_extra_abivars({k: v})
def __str__(self):
"""String representation (the section of the abinit input file)."""
return self.get_string()
@property
def abiobjects(self):
"""List of objects stored in self."""
return self.abiobj_dict.values()
def add_abiobj(self, obj):
"""Add the object to self."""
if not hasattr(obj, "to_abivars"):
raise ValueError("%s does not define the method to_abivars" % str(obj))
cname = obj.__class__.__name__
if cname in self.abiobj_dict:
raise ValueError("%s is already stored" % cname)
self.abiobj_dict[cname] = obj
def add_extra_abivars(self, abivars):
"""Add variables (dict) to extra_abivars."""
self.extra_abivars.update(abivars)
def to_abivars(self):
"""Returns a dictionary with the abinit variables defined by the Card."""
abivars = {}
for obj in self.abiobjects:
abivars.update(obj.to_abivars())
abivars.update(self.extra_abivars)
return abivars
#def list_objects(self):
# "String comment (comment of self + comments of the objects, if any)"
# for obj in self.abiobjects:
# if hasattr(obj, "comment"):
# lines.append("%s: %s" % (obj.__class__.__name__, obj.comment))
# return "\n".join(lines)
@staticmethod
def _format_kv(key, value):
"""Formatter"""
if value is None:
return [] # Use ABINIT default.
if isinstance(value, collections.Iterable) and not is_string(value):
arr = np.array(value)
if len(arr.shape) in [0,1]: # scalar or vector.
token = [key, " ".join(str(i) for i in arr)]
else:
# array --> matrix
matrix = np.reshape(arr, (-1, arr.shape[-1]))
lines = []
for (idx, row) in enumerate(matrix):
lines.append(" ".join(str(i) for i in row))
token = [key +"\n", "\n".join(lines)]
else:
token = [key, str(value)]
return token
def get_string(self, pretty=False):
"""
Returns a string representation of self. The reason why this
method is different from the __str__ method is to provide options for pretty printing.
Args:
pretty:
Set to True for pretty aligned output.
"""
lines = []
app = lines.append
# Write the Abinit objects first.
for obj in self.abiobjects:
app([80*"#", ""])
app(["#", "%s" % obj.__class__.__name__])
app([80*"#", ""])
for (k, v) in obj.to_abivars().items():
app(self._format_kv(k, v))
# Extra variables.
if self.extra_abivars:
app([80*"#", ""])
app(["#", "Extra_Abivars"])
app([80*"#", ""])
for (k, v) in self.extra_abivars.items():
app(self._format_kv(k, v))
if pretty:
return str_aligned(lines, header=None)
else:
return str_delimited(lines, header=None, delimiter=5*" ")
class StrategyWithInput(object):
# TODO: Find a better way to do this. I will likely need to refactor the Strategy object
def __init__(self, abinit_input):
self.abinit_input = abinit_input
@property
def pseudos(self):
# FIXME: pseudos must be order but I need to define an ABC for the Strategies and Inputs.
# Order pseudos
pseudos = self.abinit_input.pseudos
return order_pseudos(pseudos, self.abinit_input.structure)
#print("pseudos", pseudos)
#print("ord_pseudos", ord_pseudos)
#return ord_pseudos
#return self.abinit_input.pseudos
def add_extra_abivars(self, abivars):
"""Add variables (dict) to extra_abivars."""
self.abinit_input.set_variables(**abivars)
def remove_extra_abivars(self, keys):
"""Remove variables from extra_abivars."""
self.abinit_input.remove_variables(keys)
def make_input(self):
return str(self.abinit_input)
class OpticVar(collections.namedtuple("OpticVar", "name value help")):
def __str__(self):
sval = string(self.value)
return (4*" ").join(sval, "!" + self.help)
class OpticInput(object):
"""
abo_1WF7 ! Name of the first d/dk response wavefunction file, produced by abinit
abo_1WF8 ! Name of the second d/dk response wavefunction file, produced by abinit
abo_1WF9 ! Name of the third d/dk response wavefunction file, produced by abinit
abo_WFK ! Name of the ground-state wavefunction file, produced by abinit
0.01 ! Value of the *smearing factor*, in Hartree
0.010 1 ! frequency *step* and *maximum* frequency (Ha)
0.000 ! *Scissor* shift if needed, in Hartree
0.001 ! *Tolerance* on closeness of singularities (in Hartree)
3 ! *Number of components* of linear optic tensor to be computed
11 33 23 ! Linear *coefficients* to be computed (x=1, y=2, z=3)
2 ! Number of components of nonlinear optic tensor to be computed
123 222 ! Non-linear coefficients to be computed
"""
# variable name --> default value.
_VARIABLES = [
OpticVar("ddkfile_x", None, "Name of the first d/dk response wavefunction file"),
OpticVar("ddkfile_y", None, "Name of the second d/dk response wavefunction file"),
OpticVar("ddkfile_z", None, "Name of the third d/dk response wavefunction file"),
OpticVar("wfkfile", None, "Name of the ground-state wavefunction file"),
OpticVar("zcut", 0.01, "Value of the *smearing factor*, in Hartree"),
OpticVar("wmesh",(0.010, 1), "Frequency *step* and *maximum* frequency (Ha)"),
OpticVar("scissor", 0.000, "*Scissor* shift if needed, in Hartree"),
OpticVar("sing_tol", 0.001, "*Tolerance* on closeness of singularities (in Hartree)"),
OpticVar("num_lin_comp", None, "*Number of components* of linear optic tensor to be computed"),
OpticVar("lin_comp", None, "Linear *coefficients* to be computed (x=1, y=2, z=3)"),
OpticVar("num_nonlin_comp", None, "Number of components of nonlinear optic tensor to be computed"),
OpticVar("nonlin_comp", None, "! Non-linear coefficients to be computed"),
]
_VARNAMES = [v.name for v in _VARIABLES]
def __init__(self, **kwargs):
# Default values
self.vars = collections.OrderedDict((v.name, v.value) for v in _VARIABLES)
# Update the variables with the values passed by the user
for k, v in kwargs:
if k not in self.VARNAMES:
raise ValueError("varname %s not in %s" % (k, str(self.VARNAMES)))
self.vars[k] = v
def __init__(self, zcut, wstep, wmax, scissor, sing_tol, linear_components,
nonlinear_components=None, ddk_files=None, wfk=None):
self.vars = vars = collections.OrderedDict(*self.VAR_NAMES)
if ddk_files is not None:
assert len(ddk_files) == 3
assert wfk is not None
for dir, ddk in zip(["x", "y", "z"], ddk_files):
vars["ddkfile_" + dir] = os.path.abspath(ddk)
if wfk is not None:
vars["wfkfile"] = os.path.abspath(wfk)
vars["zcut"] = zcut
vars["wmesh"] = " ".join(map(str, (wstep, wmax)))
vars["sing_tol"] = sing_tol
vars["num_lin_comp"] = len(linear_components)
vars["lin_comp"] = " ".join(str(c) for c in linear_components)
vars["num_nonlin_comp"] = len(non_linear_components)
vars["nonlin_comp"] = " ".join(str(c) for c in nonlinear_components)
def __init__(self, string):
self.string = string
def __str__(self):
return self.string
def to_string(self):
lines = []
app = lines.append
for name in self.VARNAMES:
var = self.vars[name]
app(str(var))
return "\n".join(lines)
def make_input(self):
return str(self)
def add_extra_abivars(self, abivars):
"""
Connection is explicit via the input file
since we can pass the paths of the output files
produced by the previous runs.
"""
class AnaddbInput(object):
def __init__(self, structure=None, ndtset=1, comment=""):
"""
Args:
structure:
Crystalline structure.
ndtset:
Number of datasets.
comment:
Optional string with a comment that will be placed at the beginning of the file.
"""
self._structure = _structure
self.ndtset = ndset
self.comment = comment
def __init__(self, string):
self.string = string
@property
def structure(self):
return self._structure
def to_string(self):
return self.string
def make_input(self):
return self.to_string()
def add_extra_abivars(self, abivars):
"""
Connection is explicit via the input file
since we can pass the paths of the output files
produced by the previous runs.
"""
#def set_qpath(self):
# """
# nqpath 7
# qpath
# 0.0 0.0 0.0
# 1/2 1/2 0.0
# 1 1 1
# 1/2 1/2 1/2
# 1/2 1/2 0.0
# 1/2 3/4 1/4
# 1/2 1/2 1/2
# """
#def split_datasets(self):
# """
# Split an input file with multiple datasets into a list of `ndtset` distinct input files.
# """
# # Propagate subclasses (if any)
# cls = self.__class__
# news = []
# for i in range(self.ndtset):
# my_vars = self[i+1].allvars
# my_vars.pop("ndtset", None)
# new = cls(pseudos=self.pseudos, ndtset=1)
# new.set_variables(**my_vars)
# news.append(new)
#
# return news
|
from models.ShoppingCart import *
from models.Store import *
from models.Item import *
from eshopee import *
from main import *
def test_getStoreItems():
store1 = Store()
assert len(store1.getStoreItems()) == len(Store.getStoreItems(Store()))
def test_getTotalPrice():
shop = ShoppingCart()
shop.items = []
assert sum(shop.items) == ShoppingCart.getTotalPrice(ShoppingCart())
def test_getCartItems():
cart1 = ShoppingCart()
assert len(cart1.getCartItems()) == len(ShoppingCart.getCartItems(ShoppingCart()))
|
"""
Fanciful names for integer indexes into lists - either a day of week,
a month, a planet, or a chemical element.
"""
import calendar
def to_index(name):
if isinstance(name, float):
raise KeyError('Indexes cannot be floating point')
try:
return int(name)
except:
pass
try:
return NAME_TO_INDEX[name.lower()]
except:
raise KeyError('Can\'t understand index "%s"' % name)
def to_names(index):
return INDEX_TO_NAME[index]
def _combine(*name_lists):
name_to_index = {}
index_to_name = {}
def add(i, name):
nl = name.lower()
if nl not in name_to_index:
name_to_index[nl] = i
index_to_name.setdefault(i, []).append(name)
elif nl not in DUPES:
raise ValueError(name + ' duplicated')
for z in ZEROES:
add(0, z)
for name_list in name_lists:
for i, names in enumerate(name_list):
if isinstance(names, str):
names = names,
for name in names:
add(i + 1, name)
return name_to_index, index_to_name
DUPES = 'may', 'mercury'
ZEROES = 'none', 'nothing', 'zero', 'zip'
DAYS = tuple(zip(calendar.day_abbr, calendar.day_name))
MONTHS = tuple(zip(calendar.month_abbr, calendar.month_name))[1:]
COLORS = 'red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet'
PLANETS = ('Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus',
'Neptune', 'Pluto')
ELEMENTS = (
('H', 'hydrogen'),
('He', 'helium'),
('Li', 'lithium'),
('Be', 'beryllium'),
('B', 'boron'),
('C', 'carbon'),
('N', 'nitrogen'),
('O', 'oxygen'),
('F', 'fluorine'),
('Ne', 'neon'),
('Na', 'sodium'),
('Mg', 'magnesium'),
('Al', 'aluminum'),
('Si', 'silicon'),
('P', 'phosphorus'),
('S', 'sulfur'),
('Cl', 'chlorine'),
('Ar', 'argon'),
('K', 'potassium'),
('Ca', 'calcium'),
('Sc', 'scandium'),
('Ti', 'titanium'),
('V', 'vanadium'),
('Cr', 'chromium'),
('Mn', 'manganese'),
('Fe', 'iron'),
('Co', 'cobalt'),
('Ni', 'nickel'),
('Cu', 'copper'),
('Zn', 'zinc'),
('Ga', 'gallium'),
('Ge', 'germanium'),
('As', 'arsenic'),
('Se', 'selenium'),
('Br', 'bromine'),
('Kr', 'krypton'),
('Rb', 'rubidium'),
('Sr', 'strontium'),
('Y', 'yttrium'),
('Zr', 'zirconium'),
('Nb', 'niobium'),
('Mo', 'molybdenum'),
('Tc', 'technetium'),
('Ru', 'ruthenium'),
('Rh', 'rhodium'),
('Pd', 'palladium'),
('Ag', 'silver'),
('Cd', 'cadmium'),
('In', 'indium'),
('Sn', 'tin'),
('Sb', 'antimony'),
('Te', 'tellurium'),
('I', 'iodine'),
('Xe', 'xenon'),
('Cs', 'cesium'),
('Ba', 'barium'),
('La', 'lanthanum'),
('Ce', 'cerium'),
('Pr', 'praseodymium'),
('Nd', 'neodymium'),
('Pm', 'promethium'),
('Sm', 'samarium'),
('Eu', 'europium'),
('Gd', 'gadolinium'),
('Tb', 'terbium'),
('Dy', 'dysprosium'),
('Ho', 'holmium'),
('Er', 'erbium'),
('Tm', 'thulium'),
('Yb', 'ytterbium'),
('Lu', 'lutetium'),
('Hf', 'hafnium'),
('Ta', 'tantalum'),
('W', 'tungsten'),
('Re', 'rhenium'),
('Os', 'osmium'),
('Ir', 'iridium'),
('Pt', 'platinum'),
('Au', 'gold'),
('Hg', 'mercury'),
('Tl', 'thallium'),
('Pb', 'lead'),
('Bi', 'bismuth'),
('Po', 'polonium'),
('At', 'astatine'),
('Rn', 'radon'),
('Fr', 'francium'),
('Ra', 'radium'),
('Ac', 'actinium'),
('Th', 'thorium'),
('Pa', 'protactinium'),
('U', 'uranium'),
('Np', 'neptunium'),
('Pu', 'plutonium'),
('Am', 'americium'),
('Cm', 'curium'),
('Bk', 'berkelium'),
('Cf', 'californium'),
('Es', 'einsteinium'),
('Fm', 'fermium'),
('Md', 'mendelevium'),
('No', 'nobelium'),
('Lr', 'lawrencium'),
('Rf', 'rutherfordium'),
('Db', 'dubnium'),
('Sg', 'seaborgium'),
('Bh', 'bohrium'),
('Hs', 'hassium'),
('Mt', 'meitnerium'),
('Ds', 'darmstadtium'),
('Rg', 'roentgenium'),
('Cn', 'copernicium'),
('Nh', 'nihonium'),
('Fl', 'flerovium'),
('Mc', 'moscovium'),
('Lv', 'livermorium'),
('Ts', 'tennessine'),
('Og', 'oganesson'),
)
NAME_TO_INDEX, INDEX_TO_NAME = _combine(DAYS, MONTHS, COLORS, PLANETS, ELEMENTS)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-16 18:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import simplesite.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='Last Modification')),
('content', models.TextField(blank=True, null=True, verbose_name='Main Content')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_public', models.BooleanField(default=True, verbose_name='Public')),
('is_header', models.BooleanField(default=False, verbose_name='Belongs to Header')),
('is_footer', models.BooleanField(default=False, verbose_name='Belongs to Footer')),
('_related_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='_related_model', to='contenttypes.ContentType', verbose_name='Related Content')),
],
options={
'ordering': ['sort_order', 'creation_date', 'slug'],
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
},
),
migrations.CreateModel(
name='PageImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('img_type', models.CharField(blank=True, choices=[('thumbnail', 'Thumbnail Image'), ('detail', 'Detail Image'), ('gallery', 'Gallery Image')], max_length=255, null=True, verbose_name='Image Type')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_page_image_path)),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_set', to='simplesite.Page')),
],
options={
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('url', models.URLField(max_length=255, verbose_name='URL')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_socialnetwork_image_path, verbose_name='Image')),
],
options={
'ordering': ['sort_order', 'creation_date'],
'verbose_name': 'Social Network',
'verbose_name_plural': 'Social Networks',
},
),
]
|
from app import app
from neo4j import GraphDatabase, basic_auth
def setup_neo4j_driver(host, port, login, password):
try:
uri = f"bolt://{host}:{port}"
driver = GraphDatabase.driver(uri,
auth=basic_auth(login, password),
encrypted=False)
return driver
except:
pass
driver = setup_neo4j_driver("localhost", "7687", "neo4j", "password")
|
import csv
import copy
def is_valid(sequence, number):
""" Returns True if the number is a sum of two discrete numbers in
the preceding sequence.
"""
success = False
for i,num in enumerate(sequence):
if (number - num) in sequence[i:]:
success = True
break
return success
if __name__ == "__main__":
# load data
data = []
with open("input1.txt", "r") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if (len(row) != 0):
data.append(int(row[0]))
# part #1: find first invalid number
for i,num in enumerate(data):
# skip first N numbers
if i < 25:
continue
if not is_valid(data[i-25:i],num):
print("First invalid number: {}".format(num))
invalid = num
break
# part #2: iterate through the list and find the first
# contiguous set that add up to the invalid number
success = False
for i,num in enumerate(data):
sum_ = num
j = i + 1
range_ = [num]
while True:
# add the next number and check
sum_ += data[j]
range_.append(data[j])
if sum_ > invalid:
break
elif sum_ == invalid:
success = True
print("Found sequence: ({}:{}), ({}:{})".format(i,j,num,range_[-1]))
print("Answer: {}".format(min(range_) + max(range_)))
# otherwise continue
j += 1
# check stop conditions
if success:
break
|
#!/usr/bin/env python3
#FUNÇÂO DE CALIBRAÇÂO
#COLOCAR AS CORES DA PISTA
#SE A PISTA NAO TIVER YELLLOW TIRE O Yellow
#SE NECESSARIO CALIBRAR UM COR PARA ROBO NAO CAIR
#CRIE ESSA COR COM QUALQUER NOME E CALIBRE NORMALMENTE
#NO PROGRAMA PRINCIPAL CHAMAR ESSA COR PELO NOME DADO AQUI
import ev3dev2.fonts as fonts
from ev3dev2.button import Button
from time import sleep
from ev3dev2.sensor import INPUT_4, INPUT_2
from ev3dev.ev3 import *
from ev3dev2.sensor.lego import ColorSensor
import pickle
import os
os.system('setfont Lat15-TerminusBold32x16')
btn = Button()
cores = {
'Red': (),
'Green': (),
# 'Yellow': (),
'White': (),
'Black': (),
'Blue':(),
'DarkBlue':(),
'LightGreen':(),
}
Sensor_direita = ColorSensor(INPUT_4)
Sensor_esquerda = ColorSensor(INPUT_2)
Sensor_direita.mode= Sensor_direita.MODE_RGB_RAW
Sensor_esquerda.mode = Sensor_esquerda.MODE_RGB_RAW
def mostre(frase):
print(frase,end = ' \r')
def media(leitura1,leitura2):#FAZ A MÈDIA DAS LEITURAS DOS AMBOS SENSORES, NÂO USAR NO ALINHAMENTO
media=[]
for x in range(3):
media.append((leitura1[x]+leitura2[x])/2)
return tuple(media)
for cor in cores.keys():
frase = "Coloque na cor: {}".format(cor)
mostre(frase)
print("Coloque na cor:",cor)
Sound.speak(cor)
while not btn.any():pass
cores[cor]=media(Sensor_direita.rgb,Sensor_esquerda.rgb)
sleep(1)
mostre('Todas as cores registradas')
mostre('Salvando arquivo Cores.p...')
pickle.dump(cores,open('Cores.p','wb'))
mostre('Saindo')
|
"""
Zaimのアクセストークンを取得する
"""
from zaim_client import ZaimClient
if __name__ == '__main__':
client = ZaimClient()
client.print_access_token()
|
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
class MutlInfo(nn.Module):
def __init__(self, num_classes=4):
super(MutlInfo, self).__init__()
self.convnet_1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.convnet_2 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fcnet = nn.Sequential(
nn.Linear(2 * 512 * 30 + num_classes, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 512),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(512, 1)
)
# initialize weight
self._initialize_weights()
def forward(self, x, z, u):
out1 = self.convnet_1(x)
out1 = self.convnet_2(out1)
out1 = out1.view(out1.size(0), -1)
out2 = self.convnet_2(z)
out2 = out2.view(out2.size(0), -1)
out = torch.cat((out1, out2, u), dim=1)
out = self.fcnet(out)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def info_loss(MI, x, z, u, x_prime):
Ej = -F.softplus(-MI(x, z, u)).mean()
Em = F.softplus(MI(x_prime, z, u)).mean()
return Ej - Em
|
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Dropout, GRU
from keras.optimizers import Adam, SGD, RMSprop
import numpy as np
import random
import cv2
import display as dp
from datetime import datetime
def arrange_data(x_train, y_train):
y_train = np.nan_to_num(y_train)
print("y_train:", np.shape(y_train))
#x_train = np.reshape(x_train,(np.shape(x_train)[0],np.shape(x_train)[1]),int(np.shape(x_train)[2]/60))
x_train = np.nan_to_num(x_train)
print("x_train:", np.shape(x_train))
return x_train, y_train
train_data = ["D1/20151110175712-16km-D1-NORMAL1-SECONDARY/SEMANTIC_ONLINE.txt","D1/20151110180824-16km-D1-NORMAL2-SECONDARY/SEMANTIC_ONLINE.txt","D1/20151111123124-25km-D1-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D1/20151111125233-24km-D1-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt","D1/20151111132348-25km-D1-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D1/20151111134545-16km-D1-AGGRESSIVE-SECONDARY/SEMANTIC_ONLINE.txt","D1/20151111135612-13km-D1-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt",
"D2/20151120131714-26km-D2-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D2/20151120133502-26km-D2-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt","D2/20151120135152-25km-D2-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D2/20151120160904-16km-D2-NORMAL1-SECONDARY/SEMANTIC_ONLINE.txt","D2/20151120162105-17km-D2-NORMAL2-SECONDARY/SEMANTIC_ONLINE.txt","D2/20151120163350-16km-D2-AGGRESSIVE-SECONDARY/SEMANTIC_ONLINE.txt","D2/20151120164606-16km-D2-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt",
"D3/20151126110502-26km-D3-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D3/20151126113754-26km-D3-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D3/20151126124208-16km-D3-NORMAL1-SECONDARY/SEMANTIC_ONLINE.txt","D3/20151126125458-16km-D3-NORMAL2-SECONDARY/SEMANTIC_ONLINE.txt","D3/20151126130707-16km-D3-AGGRESSIVE-SECONDARY/SEMANTIC_ONLINE.txt","D3/20151126132013-17km-D3-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt","D3/20151126134736-26km-D3-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt",
"D4/20151203171800-16km-D4-NORMAL1-SECONDARY/SEMANTIC_ONLINE.txt","D4/20151203173103-17km-D4-NORMAL2-SECONDARY/SEMANTIC_ONLINE.txt","D4/20151203174324-16km-D4-AGGRESSIVE-SECONDARY/SEMANTIC_ONLINE.txt","D4/20151203175637-17km-D4-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt","D4/20151204152848-25km-D4-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D4/20151204154908-25km-D4-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt","D4/20151204160823-25km-D4-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt",
"D5/20151209151242-25km-D5-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D5/20151209153137-25km-D5-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt","D5/20151211160213-25km-D5-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D5/20151211162829-16km-D5-NORMAL1-SECONDARY/SEMANTIC_ONLINE.txt","D5/20151211164124-17km-D5-NORMAL2-SECONDARY/SEMANTIC_ONLINE.txt","D5/20151211165606-12km-D5-AGGRESSIVE-SECONDARY/SEMANTIC_ONLINE.txt","D5/20151211170502-16km-D5-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt",
"D6/20151217162714-26km-D6-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D6/20151217164730-25km-D6-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D6/20151221112434-17km-D6-NORMAL-SECONDARY/SEMANTIC_ONLINE.txt","D6/20151221113846-16km-D6-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt","D6/20151221120051-26km-D6-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt"]
# ,"D3/20151126113754-26km-D3-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt"
random.shuffle(train_data)
#test_data = ["D6/20151217162714-26km-D6-NORMAL-MOTORWAY/SEMANTIC_ONLINE.txt","D6/20151217164730-25km-D6-DROWSY-MOTORWAY/SEMANTIC_ONLINE.txt","D6/20151221112434-17km-D6-NORMAL-SECONDARY/SEMANTIC_ONLINE.txt","D6/20151221113846-16km-D6-DROWSY-SECONDARY/SEMANTIC_ONLINE.txt","D6/20151221120051-26km-D6-AGGRESSIVE-MOTORWAY/SEMANTIC_ONLINE.txt"]
# test_file = ["D1/20151110175712-16km-D1-NORMAL1-SECONDARY/"]
# videoName = ["20151110175712-16km-D1-NORMAL1-SECONDARY.mp4"]
test_file = ["D3/20151126113754-26km-D3-DROWSY-MOTORWAY/"]
videoName = ["20151126113753-26km-D3-DROWSY-MOTORWAY.mp4"]
data_dim = 7
timesteps = 60 #29150
num_classes = 3
'''
model = Sequential()
model.add(GRU(128, dropout = 0.25, recurrent_dropout = 0.2, return_sequences=True, input_shape=(timesteps,data_dim))) # returns a sequence of vectors of dimension 32
model.add(GRU(134, dropout = 0.25,recurrent_dropout = 0.2, return_sequences=True)) # returns a sequence of vectors of dimension 3
model.add(GRU(3, dropout = 0.25, recurrent_dropout = 0.2))
opt = SGD(lr=0.001, decay=5e-4, momentum=0.9, nesterov=True)
model.compile(loss='mse', optimizer=opt, metrics=['mae','accuracy'])
y_train_buf = np.loadtxt(train_data[0],usecols=(11,12,13))
y_train = y_train_buf[59:,:]
for j in range(len(train_data)):
if(j == 0):
continue
y_temp_buf = np.loadtxt(train_data[j],usecols=(11,12,13))
y_temp = y_temp_buf[59:,:]
y_train = np.append(y_train,y_temp, axis = 0)
x_train_buf = np.loadtxt(train_data[0],usecols=(4,5,6,7,8,9,10))
x_train = np.zeros((np.shape(x_train_buf)[0]-59,60,np.shape(x_train_buf)[1]))
print(np.shape(x_train))
for k in range(np.shape(x_train_buf)[0]-59):
x_train[k] = x_train_buf[k:k+60]
for j in range(len(train_data)):
if(j == 0):
continue
x_temp_buf = np.loadtxt(train_data[j],usecols=(4,5,6,7,8,9,10))
x_temp = np.zeros((np.shape(x_temp_buf)[0]-59,60,np.shape(x_temp_buf)[1]))
for k in range(np.shape(x_temp_buf)[0]-59):
x_temp[k] = x_temp_buf[k:k+60]
x_train = np.append(x_train,x_temp, axis = 0)
x_train = x_train/100
x_train, y_train = arrange_data(x_train, y_train)
model.fit(x_train,y_train,batch_size=1024,epochs=150, verbose = 2)
model.save("save_RNN.h5")
'''
model = load_model("save_RNN_decent.h5")
'''
print("Evaluating:")
y_test_buf = np.loadtxt(test_data[0],usecols=(11,12,13))
y_test = y_test_buf[59:,:]
for j in range(len(test_data)):
if(j == 0):
continue
y_temp_buf = np.loadtxt(test_data[j],usecols=(11,12,13))
y_temp = y_temp_buf[59:,:]
y_test = np.append(y_test,y_temp, axis = 0)
print(np.shape(y_test))
x_test_buf = np.loadtxt(test_data[0],usecols=(4,5,6,7,8,9,10))
x_test = np.zeros((np.shape(x_test_buf)[0]-59,60,np.shape(x_test_buf)[1]))
for k in range(np.shape(x_test_buf)[0]-59):
x_test[k] = x_test_buf[k:k+60]
for j in range(len(test_data)):
if(j == 0):
continue
x_temp_buf = np.loadtxt(test_data[j],usecols=(4,5,6,7,8,9,10))
x_temp = np.zeros((np.shape(x_temp_buf)[0]-59,60,np.shape(x_temp_buf)[1]))
for k in range(np.shape(x_temp_buf)[0]-59):
x_temp[k] = x_temp_buf[k:k+60]
x_test = np.append(x_test,x_temp, axis = 0)
x_test = x_test/100
x_test, y_test = arrange_data(x_test, y_test)
y_test_predict = model.predict(x_test)
for i in range(np.shape(y_test_predict)[0]):
print(y_test_predict[i],y_test[i])
score = model.evaluate(x_test,y_test,batch_size=1500)
print("Score:",score)'''
print("Evaluating:")
for j in range(len(test_file)):
test_data = test_file[j] + "SEMANTIC_ONLINE.txt"
y_temp_buf = np.loadtxt(test_data,usecols=(11,12,13))
y_temp = y_temp_buf[59:,:]
x_temp_buf = np.loadtxt(test_data,usecols=(4,5,6,7,8,9,10))
videoName1 = test_file[j] + videoName[j]
videoDateString = videoName1.split('/')[-1][0:14]
dataDateString = test_file[j].split('/')[-2][0:14]
videoDate = datetime.strptime(videoDateString, "%Y%m%d%H%M%S")
dataDate = datetime.strptime(dataDateString, "%Y%m%d%H%M%S")
delayVideoToData = (dataDate - videoDate).total_seconds()
cap = cv2.VideoCapture(videoName1)
if(delayVideoToData<=0):
print("Initial one minute")
for i in range(1800):
_,frame = cap.read()
dp.display(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
for k in range(np.shape(x_temp_buf)[0]-59):
p = k-int(delayVideoToData)
x_temp = x_temp_buf[p:p+60]
x_temp = np.reshape(x_temp,(1,60,7))
x_temp = x_temp/100
y_pred = model.predict(x_temp) #for every
sen = x_temp[:,59,:]
print(y_pred,y_temp[k])
y_pred = 10*y_pred
for i in range(30):
ret, frame = cap.read()
dp.display(frame,np.reshape(y_pred,(1,3)).tolist()[0],np.reshape(sen,(1,7)).tolist()[0])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elif(delayVideoToData>0):
print('Initial one minute')
for i in range(1800+int(delayVideoToData)*30):
_,frame = cap.read()
dp.display(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
for k in range(np.shape(x_temp_buf)[0]-59):
x_temp = x_temp_buf[k:k+60]
x_temp = np.reshape(x_temp,(1,60,7))
x_temp = x_temp/100
y_pred = model.predict(x_temp) #for every minute
sen = x_temp[:,59,:]
print(y_pred,y_temp[k])
y_pred = 10*y_pred
for i in range(30):
ret, frame = cap.read()
dp.display(frame,np.reshape(y_pred,(1,3)).tolist()[0],np.reshape(sen,(1,7)).tolist()[0])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
import time
a = int(input())
b= {}
def m(a):
if a<=4:
return a
if a//2 in b:
return b[a//2]
if a//3 in b:
return b[a//3]
if a//4 in b:
return b[a//4]
c1 = max(a//2, m(a//2))
c3 = max(a//3, m(a//3))
c4 = max(a//4, m(a//4))
b[a]=c1+c3+c4
return b[a]
t1 = time.perf_counter()
print(m(a))
t2 = -t1+time.perf_counter()
print(t2)
print(b)
|
import os
import re
import sys
from irc.client import NickMask
here = lambda x: os.path.join(os.path.dirname(__file__), x)
conf = lambda x: os.path.join(os.path.dirname(__file__), "conf/", x)
BT24_ENABLED = False
try:
from bitcoin24.bitcoin24 import Bitcoin24
BT24_ENABLED = True
except ImportError:
# happens, e.g., when the requests-library is not installed
pass
class InvalidCurrencyError(Exception):
pass
class ConversionNotPossibleError(Exception):
pass
def resolve_alias(source):
try:
f = open(conf("aliases"))
for line in f:
frm, to = line.strip().split()
if frm == source:
return to
except IOError:
pass
return "%s" % source
def btc2eur(amount):
if not BT24_ENABLED:
return None
bt24 = Bitcoin24()
last = bt24.ticker()['last']
return amount * float(last)
def eur2btc(amount):
if not BT24_ENABLED:
return None
bt24 = Bitcoin24()
last = bt24.ticker()['last']
return amount / float(last)
def any2btc(amount):
currency = amount[-3:]
amount = float(amount[:-3])
if currency not in ("EUR", "BTC"):
raise InvalidCurrencyError(currency)
if currency != "BTC" and not BT24_ENABLED:
raise ConversionNotPossibleError
if currency == "EUR":
amount = eur2btc(amount)
return amount, "BTC"
###########################
# Wrappers around bot._rpc
# Resolve aliases at the last possible moment.
def has_account(bot, host):
accounts = bot._rpc("listaccounts")
return resolve_alias(host) in accounts
def create_account(bot, host):
bot._rpc("getnewaddress", "%s" % resolve_alias(host))
def get_balance(bot, host):
return bot._rpc("getbalance", "%s" % resolve_alias(host))
def get_account_address(bot, host):
return bot._rpc("getaccountaddress", "%s" % resolve_alias(host))
def move(bot, src, dst, amount):
bot._rpc("move", "%s" % resolve_alias(src),
"%s" % resolve_alias(dst), amount)
def get_txfee():
try:
return float(open(conf("txfee")).read().strip())
except IOError:
return 0.001 # fall back
def sendfrom(bot, host, address, amount):
host = "%s" % resolve_alias(host)
return bot._rpc("sendfrom", "%s" % host, "%s" % address, amount)
######################
# Actual IRC handlers
waiting_on_host = {} # nickname => (fun, list(partialargs))
USERHOST_RE = re.compile("([a-zA-Z0-9_\\\[\]{}^`|-]+)(\*?)=[+-](.+)")
def on_userhost(bot, e):
m = USERHOST_RE.search(e.arguments[0].strip())
if m:
nick = m.group(1)
ircop = m.group(2)
host = m.group(3)
if nick in waiting_on_host:
fun, partialargs = waiting_on_host[nick]
partialargs.append("%s!%s" % (nick, host))
fun(*partialargs)
del waiting_on_host[nick]
def on_privmsg(bot, e):
args = e.arguments[0].split()
# privmsg don't need to be prefixed with +, but might be.
handle_command(bot, e, args)
def on_pubmsg(bot, e):
args = e.arguments[0].split()
if args[0].startswith("+"):
handle_command(bot, e, args)
def handle_command(bot, e, args):
thismodule = sys.modules[__name__]
if args[0].startswith("+"):
args[0] = args[0][1:]
if not args[0].strip():
return
if not hasattr(thismodule, "do_%s" % args[0]):
return
getattr(thismodule, "do_%s" % args[0])(bot, e, args[1:])
###################
# Command handlers
def do_reload(bot, e, args):
bot.reload_handlers()
def do_help(bot, e, args):
target = e.target
if e.type == "privmsg":
target = e.source.nick
msg = lambda m: bot.connection.privmsg(target, m)
thismodule = sys.modules[__name__]
for elem in dir(thismodule):
if not elem.startswith("do_"):
continue
fun = getattr(thismodule, elem)
if not fun.__doc__:
continue
msg(fun.__doc__)
def do_ls(bot, e, args):
# small test-function.
if e.type == "privmsg":
bot.connection.privmsg(e.source.nick, "Can only use +ls in channels")
return
users = bot.channels[e.target].users()
for user in users:
bot.connection.send_raw("USERHOST :%s" % user)
def send_tip(bot, target, amount, currency, source, dest):
print "Sending a tip from %s to %s" % (source, dest)
source = NickMask(source)
dest = NickMask(dest)
bot.connection.privmsg(target, "Sending %s%s from %s to %s" % (amount, currency, source.nick, dest.nick))
bot.connection.privmsg(dest.nick,
"%s just sent you a tip of %s%s" % (source.nick, amount, currency))
if not has_account(bot, dest):
create_account(bot, dest)
bot.connection.privmsg(dest.nick,
"I created a wallet for you. For more information on your wallet, please send me the +wallet command.")
move(bot, source, dest, amount)
def do_tip(bot, e, args):
"+tip <nick> <amount><BTC|EUR> (give someone a bit of money)"
if len(args) != 2:
bot.connection.privmsg(e.target, "Usage: +tip <nick> <amount><BTC|EUR>")
return
user = args[0]
amount = args[1]
try:
amount, currency = any2btc(amount)
except (InvalidCurrencyError, ConversionNotPossibleError):
print "Invalid amount:", amount
return bot.connection.privmsg(e.target,
"Usage: +tip <nick> <amount><BTC|EUR>")
if user not in bot.channels[e.target].users():
bot.connection.privmsg(e.target, "%s is not on this channel." % user)
return
if not has_account(bot, e.source):
return bot.connection.privmsg(e.target,
"You don't have a wallet. Use +wallet to get more information.")
balance = get_balance(bot, e.source)
if amount > balance:
return bot.connection.privmsg(e.source.nick,
"You don't have enough BTC in your wallet.")
# We need a full userhost to be able to determine the account name for the
# user. Therefore, we need to send a USERHOST command, which is handled by
# the on_userhost handler above. This means that we have to split
# do_tip into two functions, and wait for the userhost to arrive.
waiting_on_host[user] = (send_tip,
[bot, e.target, amount, currency, e.source])
# unfortunately, the bot.connection.userhost() command seems to be broken.
bot.connection.send_raw("USERHOST :%s" % user)
def do_balance(bot, e, args):
"+balance (shows your balance)"
if e.source != resolve_alias(e.source):
bot.connection.privmsg(e.source.nick,
"I know you by: %s." % resolve_alias(e.source))
if not has_account(bot, e.source):
bot.connection.privmsg(e.source.nick, "You have no wallet yet.")
else:
balance = get_balance(bot, e.source)
msg = "Balance: %sBTC" % balance
if BT24_ENABLED:
msg += " (%sEUR)" % btc2eur(balance)
bot.connection.privmsg(e.source.nick, msg)
def do_wallet(bot, e, args):
"+wallet (shows the address for receiving payments to your account)"
if e.source != resolve_alias(e.source):
bot.connection.privmsg(e.source.nick,
"I know you by: %s." % resolve_alias(e.source))
if not has_account(bot, e.source):
create_account(bot, e.source)
bot.connection.privmsg(e.source.nick, "I created a wallet for you")
address = get_account_address(bot, e.source)
bot.connection.privmsg(e.source.nick,
"Your address for receiving payments is: %s" % address)
def do_xchange(bot, e, args):
"+xchange [<amount><BTC|EUR>] (gets the current exchange rate from bitcoin-24.com)"
target = e.target
if e.type == "privmsg":
target = e.source.nick
amount = 1
currency = "BTC"
if len(args):
try:
amount, currency = any2btc(args[0])
except InvalidCurrencyError:
print "Invalid amount:", amount
return bot.connection.privmsg(e.target,
"Usage: +xchange [<amount><BTC|EUR>]")
except ConversionNotPossibleError:
return bot.connection.privmsg(target,
"Bitcoin-24 integration not enabled.")
rate = btc2eur(amount)
bot.connection.privmsg(target, "%sBTC = %sEUR (bitcoin-24.com)" % (amount, rate))
def do_txfee(bot, e, args):
"+txfee (gets the current transfer fee)"
target = e.target
if e.type == "privmsg":
target = e.source.nick
txfee = get_txfee()
msg = "The current transfer fee is %sBTC" % txfee
if BT24_ENABLED:
msg += " (%sEUR)" % btc2eur(txfee)
bot.connection.privmsg(target, msg)
def do_transfer(bot, e, args):
"+transfer <amount><BTC|EUR> <bitcoinaddress> (transfer money to another account)"
target = e.target
if e.type == "privmsg":
target = e.source.nick
if len(args) != 2:
return bot.connection.privmsg(target,
"Usage: +transfer <amount><BTC|EUR> <bitcoinaddress>")
amount = args[0]
address = args[1]
try:
amount, currency = any2btc(amount)
except (InvalidCurrencyError, ConversionNotPossibleError):
print "Invalid amount:", amount
return bot.connection.privmsg(target,
"Usage: +transfer <amount><BTC|EUR> <bitcoinaddress>")
address_info = bot._rpc("validateaddress", address)
if 'isvalid' not in address_info or not address_info['isvalid']:
return bot.connection.privmsg(target,
"%s is not a valid address" % address)
if not has_account(bot, e.source):
return bot.connection.privmsg(target,
"You don't have a wallet. Use +wallet to get more information.")
txfee = get_txfee()
balance = get_balance(bot, e.source)
if amount + txfee > balance:
return bot.connection.privmsg(e.source.nick,
"You don't have enough BTC in your wallet. (Current transfer fee is %sBTC)" % txfee)
txid = None
try:
txid = sendfrom(bot, e.source, address, amount)
except:
bot.connection.privmsg(e.source.nick, "An error occurred while trying to sendfrom.")
if not txid:
return
bot.connection.privmsg(target, "Follow your transaction: http://blockchain.info/tx/%s" % txid)
|
from collections import deque
import time
from threading import RLock
from functools import wraps
from warnings import warn
import logging
import threading
import numpy as np
logger = logging.getLogger(__name__)
class UseNewProperty(RuntimeError):
...
# This is used below by StatusBase.
def _locked(func):
"an decorator for running a method with the instance's lock"
@wraps(func)
def f(self, *args, **kwargs):
with self._lock:
return func(self, *args, **kwargs)
return f
class StatusBase:
"""
This is a base class that provides a single-slot callback for when the
specific operation has finished.
Parameters
----------
timeout : float, optional
The default timeout to use for a blocking wait, and the amount of time
to wait to mark the operation as failed
settle_time : float, optional
The amount of time to wait between the caller specifying that the
status has completed to running callbacks
"""
def __init__(self, *, timeout=None, settle_time=None, done=False,
success=False):
super().__init__()
self._tname = None
self._lock = RLock()
self._callbacks = deque()
self.done = done
self.success = success
self.timeout = None
if settle_time is None:
settle_time = 0.0
self.settle_time = float(settle_time)
if timeout is not None:
self.timeout = float(timeout)
if self.done:
# in the case of a pre-completed status object,
# don't handle timeout
return
if self.timeout is not None and self.timeout > 0.0:
thread = threading.Thread(target=self._wait_and_cleanup,
daemon=True, name=self._tname)
self._timeout_thread = thread
self._timeout_thread.start()
def _wait_and_cleanup(self):
"""Handle timeout"""
try:
if self.timeout is not None:
timeout = self.timeout + self.settle_time
else:
timeout = None
wait(self, timeout=timeout, poll_rate=0.2)
except TimeoutError:
with self._lock:
if self.done:
# Avoid race condition with settling.
return
logger.debug('Status object %s timed out', str(self))
try:
self._handle_failure()
finally:
self._finished(success=False)
except RuntimeError:
pass
finally:
self._timeout_thread = None
def _handle_failure(self):
pass
def _settled(self):
"""Hook for when status has completed and settled"""
pass
def _settle_then_run_callbacks(self, success=True):
# wait until the settling time is done to mark completion
if self.settle_time > 0.0:
time.sleep(self.settle_time)
with self._lock:
if self.done:
# We timed out while waiting for the settle time.
return
self.success = success
self.done = True
self._settled()
for cb in self._callbacks:
cb()
self._callbacks.clear()
def _finished(self, success=True, **kwargs):
"""Inform the status object that it is done and if it succeeded
.. warning::
kwargs are not used, but are accepted because pyepics gives
in a bunch of kwargs that we don't care about. This allows
the status object to be handed directly to pyepics (but
this is probably a bad idea for other reason.
This may be deprecated in the future.
Parameters
----------
success : bool, optional
if the action succeeded.
"""
if self.done:
return
if success and self.settle_time > 0:
# delay gratification until the settle time is up
self._settle_thread = threading.Thread(
target=self._settle_then_run_callbacks, daemon=True,
kwargs=dict(success=success),
)
self._settle_thread.start()
else:
self._settle_then_run_callbacks(success=success)
@property
def callbacks(self):
"""
Callbacks to be run when the status is marked as finished
The callback has no arguments ::
def cb() -> None:
"""
return self._callbacks
@property
@_locked
def finished_cb(self):
if len(self.callbacks) == 1:
warn("The property `finished_cb` is deprecated, and must raise "
"an error if a status object has multiple callbacks. Use "
"the `callbacks` property instead.", stacklevel=2)
cb, = self.callbacks
assert cb is not None
return cb
else:
raise UseNewProperty("The deprecated `finished_cb` property "
"cannot be used for status objects that have "
"multiple callbacks. Use the `callbacks` "
"property instead.")
@_locked
def add_callback(self, cb):
if self.done:
cb()
else:
self._callbacks.append(cb)
@finished_cb.setter
@_locked
def finished_cb(self, cb):
if not self.callbacks:
warn("The setter `finished_cb` is deprecated, and must raise "
"an error if a status object already has one callback. Use "
"the `add_callback` method instead.", stacklevel=2)
self.add_callback(cb)
else:
raise UseNewProperty("The deprecated `finished_cb` setter cannot "
"be used for status objects that already "
"have one callback. Use the `add_callbacks` "
"method instead.")
def __and__(self, other):
"""
Returns a new 'composite' status object, AndStatus,
with the same base API.
It will finish when both `self` or `other` finish.
"""
return AndStatus(self, other)
class AndStatus(StatusBase):
"a Status that has composes two other Status objects using logical and"
def __init__(self, left, right, **kwargs):
super().__init__(**kwargs)
self.left = left
self.right = right
def inner():
with self._lock:
with self.left._lock:
with self.right._lock:
l_success = self.left.success
r_success = self.right.success
l_done = self.left.done
r_done = self.right.done
# At least one is done.
# If it failed, do not wait for the second one.
if (not l_success) and l_done:
self._finished(success=False)
elif (not r_success) and r_done:
self._finished(success=False)
elif l_success and r_success and l_done and r_done:
# Both are done, successfully.
self._finished(success=True)
# Else one is done, successfully, and we wait for #2,
# when this function will be called again.
self.left.add_callback(inner)
self.right.add_callback(inner)
def __repr__(self):
return "({self.left!r} & {self.right!r})".format(self=self)
def __str__(self):
return ('{0}(done={1.done}, '
'success={1.success})'
''.format(self.__class__.__name__, self)
)
class Status(StatusBase):
"""A basic status object
Has an optional associated object instance
Attributes
----------
obj : any or None
The object
"""
def __init__(self, obj=None, **kwargs):
self.obj = obj
super().__init__(**kwargs)
def __str__(self):
return ('{0}(obj={1.obj}, '
'done={1.done}, '
'success={1.success})'
''.format(self.__class__.__name__, self)
)
__repr__ = __str__
class DeviceStatus(StatusBase):
"""Device status
Parameters
----------
device : obj
done : bool, optional
Whether or not the motion has already completed
success : bool, optional
If motion has already completed, the status of that motion
timeout : float, optional
The default timeout to use for a blocking wait, and the amount of time
to wait to mark the motion as failed
settle_time : float, optional
The amount of time to wait between motion completion and running
callbacks
"""
def __init__(self, device, **kwargs):
self.device = device
self._watchers = []
super().__init__(**kwargs)
def _handle_failure(self):
super()._handle_failure()
logger.debug('Trying to stop %s', repr(self.device))
self.device.stop()
def __str__(self):
return ('{0}(device={1.device.name}, done={1.done}, '
'success={1.success})'
''.format(self.__class__.__name__, self)
)
def watch(self, func):
# See MoveStatus.watch for a richer implementation and more info.
if self.device is not None:
self._watchers.append(func)
func(name=self.device.name)
def _settled(self):
'''Hook for when status has completed and settled'''
for watcher in self._watchers:
watcher(name=self.device.name, fraction=1)
__repr__ = __str__
class SubscriptionStatus(DeviceStatus):
"""
Status updated via `ophyd` events
Parameters
----------
device : obj
callback : callable
Callback that takes event information and returns a boolean. Signature
should be `f(*args, **kwargs)`
event_type : str, optional
Name of event type to check whether the device has finished succesfully
timeout : float, optional
Maximum timeout to wait to mark the request as a failure
settle_time : float, optional
Time to wait after completion until running callbacks
run: bool, optional
Run the callback now
"""
def __init__(self, device, callback, event_type=None,
timeout=None, settle_time=None, run=True):
# Store device and attribute information
self.device = device
self.callback = callback
# Start timeout thread in the background
super().__init__(device, timeout=timeout, settle_time=settle_time)
# Subscribe callback and run initial check
self.device.subscribe(self.check_value,
event_type=event_type,
run=run)
def check_value(self, *args, **kwargs):
"""
Update the status object
"""
# Get attribute from device
try:
success = self.callback(*args, **kwargs)
# Do not fail silently
except Exception as e:
logger.error(e)
raise
# If successfull indicate completion
if success:
self._finished(success=True)
def _finished(self, *args, **kwargs):
"""
Reimplemented finished command to cleanup callback subscription
"""
# Clear callback
self.device.clear_sub(self.check_value)
# Run completion
super()._finished(**kwargs)
class MoveStatus(DeviceStatus):
"""Asynchronous movement status
Parameters
----------
positioner : Positioner
target : float or array-like
Target position
done : bool, optional
Whether or not the motion has already completed
success : bool, optional
If motion has already completed, the status of that motion
start_ts : float, optional
The motion start timestamp
timeout : float, optional
The default timeout to use for a blocking wait, and the amount of time
to wait to mark the motion as failed
settle_time : float, optional
The amount of time to wait between motion completion and running
callbacks
Attributes
----------
pos : Positioner
target : float or array-like
Target position
done : bool
Whether or not the motion has already completed
start_ts : float
The motion start timestamp
finish_ts : float
The motion completd timestamp
finish_pos : float or ndarray
The final position
success : bool
Motion successfully completed
"""
def __init__(self, positioner, target, *, start_ts=None,
**kwargs):
self._tname = 'timeout for {}'.format(positioner.name)
if start_ts is None:
start_ts = time.time()
self.pos = positioner
self.target = target
self.start_ts = start_ts
self.start_pos = self.pos.position
self.finish_ts = None
self.finish_pos = None
self._unit = getattr(self.pos, 'egu', None)
self._precision = getattr(self.pos, 'precision', None)
self._name = self.pos.name
# call the base class
super().__init__(positioner, **kwargs)
# Notify watchers (things like progress bars) of new values
# at the device's natural update rate.
if not self.done:
self.pos.subscribe(self._notify_watchers,
event_type=self.pos.SUB_READBACK)
def watch(self, func):
"""
Subscribe to notifications about progress. Useful for progress bars.
Parameters
----------
func : callable
Expected to accept the keyword aruments:
* ``name``
* ``current``
* ``initial``
* ``target``
* ``unit``
* ``precision``
* ``fraction``
* ``time_elapsed``
* ``time_remaining``
"""
self._watchers.append(func)
def _notify_watchers(self, value, *args, **kwargs):
# *args and **kwargs catch extra inputs from pyepics, not needed here
if not self._watchers:
return
current = value
target = self.target
initial = self.start_pos
time_elapsed = time.time() - self.start_ts
try:
fraction = abs(target - current) / abs(initial - target)
# maybe we can't do math?
except (TypeError, ZeroDivisionError):
fraction = None
if np.isnan(fraction):
fraction = None
for watcher in self._watchers:
watcher(name=self._name,
current=current,
initial=initial,
target=target,
unit=self._unit,
precision=self._precision,
time_elapsed=time_elapsed,
fraction=fraction)
@property
def error(self):
"""Error between target position and current* position
* If motion is already complete, the final position is used
"""
if self.finish_pos is not None:
finish_pos = self.finish_pos
else:
finish_pos = self.pos.position
try:
return np.array(finish_pos) - np.array(self.target)
except Exception:
return None
def _settled(self):
"""Hook for when motion has completed and settled"""
super()._settled()
self.pos.clear_sub(self._notify_watchers)
self._watchers.clear()
self.finish_ts = time.time()
self.finish_pos = self.pos.position
@property
def elapsed(self):
"""Elapsed time"""
if self.finish_ts is None:
return time.time() - self.start_ts
else:
return self.finish_ts - self.start_ts
def __str__(self):
return ('{0}(done={1.done}, pos={1.pos.name}, '
'elapsed={1.elapsed:.1f}, '
'success={1.success}, settle_time={1.settle_time})'
''.format(self.__class__.__name__, self)
)
__repr__ = __str__
def wait(status, timeout=None, *, poll_rate=0.05):
"""(Blocking) wait for the status object to complete
Parameters
----------
timeout : float, optional
Amount of time in seconds to wait. None disables, such that wait() will
only return when either the status completes or if interrupted by the
user.
poll_rate : float, optional
Polling rate used to check the status
Raises
------
TimeoutError
If time waited exceeds specified timeout
RuntimeError
If the status failed to complete successfully
"""
t0 = time.time()
def time_exceeded():
return timeout is not None and (time.time() - t0) > timeout
while not status.done and not time_exceeded():
time.sleep(poll_rate)
if status.done:
if status.success is not None and not status.success:
raise RuntimeError('Operation completed but reported an error: {}'
''.format(status))
elif time_exceeded():
elapsed = time.time() - t0
raise TimeoutError('Operation failed to complete within {} seconds '
'(elapsed {:.2f} sec)'.format(timeout, elapsed))
|
"""
Copyright 2022 Objectiv B.V.
"""
import pytest
from bach.series import SeriesList
from tests.functional.bach.test_data_and_utils import get_df_with_test_data, assert_equals_data
pytestmark = [pytest.mark.skip_postgres] # SeriesList is not (yet) supported on Postgres.
def test_basic_value_to_expression(engine):
df = get_df_with_test_data(engine)[['skating_order']]
df = df.sort_index()[:1].materialize()
df['int_list'] = SeriesList.from_value(base=df, value=[1, 2, 3], name='int_list', dtype=['int64'])
df['str_list'] = SeriesList.from_value(base=df, value=['a', 'b', 'c'], name='str_list', dtype=['string'])
assert_equals_data(
df,
expected_columns=['_index_skating_order', 'skating_order', 'int_list', 'str_list'],
expected_data=[[1, 1, [1, 2, 3], ['a', 'b', 'c']]]
)
def test_series_to_list(engine):
df = get_df_with_test_data(engine)[['skating_order']]
df['int_list'] = SeriesList.from_value(
base=df,
value=[
df.skating_order,
df.skating_order * 2,
df.skating_order * 3],
name='int_list',
dtype=['int64']
)
df['str_list'] = SeriesList.from_value(
base=df,
value=['a', 'b', 'c'],
name='str_list',
dtype=['string']
)
assert_equals_data(
df,
expected_columns=['_index_skating_order', 'skating_order', 'int_list', 'str_list'],
expected_data=[
[1, 1, [1, 2, 3], ['a', 'b', 'c']],
[2, 2, [2, 4, 6], ['a', 'b', 'c']],
[3, 3, [3, 6, 9], ['a', 'b', 'c']]
]
)
def test_getitem(engine):
df = get_df_with_test_data(engine)[['skating_order']]
df = df.sort_index()[:1].materialize()
df['int_list'] = SeriesList.from_value(base=df, value=[1, 2, 3], name='int_list', dtype=['int64'])
df['str_list'] = SeriesList.from_value(base=df, value=['a', 'b', 'c'], name='str_list', dtype=['string'])
df['a'] = df['int_list'].elements[0]
df['b'] = df['int_list'].elements[1]
df['c'] = df['int_list'].elements[2]
df['d'] = df['str_list'].elements[1]
assert_equals_data(
df,
expected_columns=[
'_index_skating_order', 'skating_order', 'int_list', 'str_list', 'a', 'b', 'c', 'd'
],
expected_data=[[1, 1, [1, 2, 3], ['a', 'b', 'c'], 1, 2, 3, 'b']]
)
assert df.dtypes['a'] == 'int64'
assert df.dtypes['b'] == 'int64'
assert df.dtypes['c'] == 'int64'
assert df.dtypes['d'] == 'string'
def test_len(engine):
df = get_df_with_test_data(engine)[['skating_order']]
df = df.sort_index()[:1].materialize()
df['empty_list'] = SeriesList.from_value(base=df, value=[], name='empty_list', dtype=['int64'])
df['int_list'] = SeriesList.from_value(base=df, value=[1, 2, 3, 4, 5, 6], name='int_list', dtype=['int64'])
df['str_list'] = SeriesList.from_value(base=df, value=['a', 'b', 'c'], name='str_list', dtype=['string'])
df['a'] = df['empty_list'].elements.len()
df['b'] = df['int_list'].elements.len()
df['c'] = df['str_list'].elements.len()
print(df.dtypes)
assert_equals_data(
df,
expected_columns=[
'_index_skating_order', 'skating_order', 'empty_list', 'int_list', 'str_list', 'a', 'b', 'c'
],
expected_data=[
[1, 1, [], [1, 2, 3, 4, 5, 6], ['a', 'b', 'c'], 0, 6, 3]
]
)
|
import os
from nltk.corpus import stopwords
import unidecode
import re
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
#Config
agg_stopword = ['s', '2018','31','diciembre','financieros','000','2019','nota','grupo','valor','2017','resultados','compania','1',
'total','consolidados','consolidado','razonable','gerencia','ciento','c','activos','cuentas','neto','us','efectivo','fecha','peru',
'inretail','2','3','importe', 'aproximadamente','b','respectivamente','ver','ano','si','vida','anos','4','d','5','i','www','com',
'aa', 'aaa', 'aaahipotecario', 'aaatat', 'aamnto', 'ab','ir','email','mes','niif','fmiv','bbb','ok','mzo','inc']
stopwords_espaniol = stopwords.words('spanish')
stopwords_espaniol.extend(agg_stopword)
def remove_stopword(x, lista_stopwords):
return [y for y in x if y not in lista_stopwords]
def clean_text(text):
'''Make text lowercase, remove text in square brackets,remove links,remove punctuation
and remove words containing numbers. Also, we added the unicode line for accent marks'''
text = str(text).lower()
text = re.sub('\[.*?\]', '', text) #Punctuations...
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
text = unidecode.unidecode(text)
return text
def make_clean_dataframe(stopwords_espaniol, path_textos):
#Accedo al path y jalo toda la info
dicc={}
for nombre_doc in os.listdir(path_textos):
text_string = open(path_textos+'/'+nombre_doc).read()
dicc[nombre_doc[:-4]] = text_string
#Limpio y transformo el texto.
dataframe = pd.DataFrame(dicc,index=[0]).T.rename(columns={0:'texto'}).reset_index()
dataframe['temp_list'] = dataframe['texto'].apply(lambda x: clean_text(x))
dataframe['temp_list'] = dataframe['temp_list'].apply(lambda x: str(x).split())
dataframe['texto_limpio'] = dataframe['temp_list'].apply(lambda x: remove_stopword(x, stopwords_espaniol))
dataframe = dataframe.rename(columns={'index':'nombre_doc'})
for k,v in dataframe['texto_limpio'].items():
dataframe.loc[k,'raw_clean_text'] = ' '.join(dataframe.loc[k,'texto_limpio'])
return dataframe
def analisis_sentimientos(path_excel, dataframe,method='count'):
positive_words = pd.read_excel(path_excel, sheet_name='Positive',header=None,names=['positivas','pos_traducidas'])
negative_words = pd.read_excel(path_excel, sheet_name='Negative',header=None,names=['negativas','neg_traducidas'])
#limpio las palabras
positive_words['pos_traducidas'] = positive_words['pos_traducidas'].str.lower().apply(lambda x: clean_text(x))
negative_words['neg_traducidas'] = negative_words['neg_traducidas'].str.lower().apply(lambda x: clean_text(x))
#Hare una lista de palabras malas y buenas. A esta lista la desplegaré en strings grandes separados por |
bad_words = negative_words['neg_traducidas'].dropna().tolist()
good_words = positive_words['pos_traducidas'].dropna(axis=0).tolist()
bad_words_str_one = '|'.join(bad_words[:1750])
bad_words_str_two = '|'.join(bad_words[1750:])
good_words_str = '|'.join(good_words)
if method == 'count':
dicc_empresas = {}
for nombre_doc in dataframe['nombre_doc'].values:
lista_nombre = nombre_doc.split('_')
dicc_empresas[lista_nombre[1]+'_'+lista_nombre[2]+ '_'+lista_nombre[3]] = {'good':dataframe.loc[dataframe['nombre_doc'] == nombre_doc, 'raw_clean_text'].str.count(good_words_str).values[0],
'bad':dataframe.loc[dataframe['nombre_doc'] == nombre_doc, 'raw_clean_text'].str.count(bad_words_str_one).values[0] +
dataframe.loc[dataframe['nombre_doc'] == nombre_doc, 'raw_clean_text'].str.count(bad_words_str_two).values[0]}
return pd.DataFrame(dicc_empresas)
elif method == 'tfidf':
vectorizer = TfidfVectorizer(ngram_range=(1,3))
tfidf = vectorizer.fit_transform(dataframe['raw_clean_text']) #
index_value = {i[1]:i[0] for i in vectorizer.vocabulary_.items()}
#Fully_indexed son todos los indices con su respectivo valor
fully_indexed = []
for row in tfidf:
fully_indexed.append({index_value[column]:value for (column,value) in zip(row.indices, row.data)})
tfidf_df = pd.DataFrame(tfidf.toarray(), columns=vectorizer.get_feature_names()).T
tfidf_df = tfidf_df.reset_index().rename(columns={'index':'palabras'})
return tfidf_df
else:
raise NotImplementedError
|
"""
Finds the number of triangles containing the origin in its interior
Author: Juan Rios
"""
import math
from itertools import combinations
def read_sets(filename):
"""
Reads sets file
"""
with open(filename) as sets_file:
sets = sets_file.read()
sets = sets.split('\n')
for idx in range(len(sets)):
coordinates = sets[idx].split(',')
points = []
for i in range(len(coordinates)):
points.append(int(coordinates[i]))
sets[idx] = sorted(points)
return sets
def test_special_sets(filename):
sets = read_sets(filename)
total = 0
for number_set in sets:
is_special = True
for idx in range(len(number_set)-1):
if number_set[idx]==number_set[idx+1]:
is_special = False
break
if not is_special:
continue
for i in range(1,(len(number_set)//2)+1):
options = [j for j in range(len(number_set))]
for comb in combinations(options,i):
a = 0
complement = options.copy()
for idx in comb:
a += number_set[idx]
complement.remove(idx)
for j in range(max(i,2),len(complement)+1):
for comb2 in combinations(complement,j):
b = 0
for idx in comb2:
b += number_set[idx]
if a==b:
is_special=False
break
if i>j:
if a<b:
is_special = False
break
elif i<j:
if a>b:
is_special = False
break
if not is_special:
break
if not is_special:
break
if not is_special:
break
print(number_set,len(number_set), is_special)
if is_special:
total += sum(number_set)
return total
if __name__ == "__main__":
filename = 'files/sets.txt'
print('The sum of elements of all special sets is {0}'.format(test_special_sets(filename)))
|
#!/usr/bin/env mayapy
#
# Copyright 2021 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
import fixturesUtils
from maya import cmds
from maya import standalone
from pxr import Gf
from pxr import Sdf
from pxr import Usd
from pxr import UsdShade
from pxr import UsdUtils
class testUsdExportUsdPreviewSurface(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.input_dir = fixturesUtils.setUpClass(__file__)
cls.temp_dir = os.path.abspath(".")
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def _GetUsdMaterial(self, stage, materialName):
modelPrimPath = Sdf.Path.absoluteRootPath.AppendChild(
"UsdPreviewSurfaceExportTest"
)
materialsRootPrimPath = modelPrimPath.AppendChild(
UsdUtils.GetMaterialsScopeName()
)
materialPrimPath = materialsRootPrimPath.AppendChild(materialName)
materialPrim = stage.GetPrimAtPath(materialPrimPath)
self.assertTrue(materialPrim)
materialSchema = UsdShade.Material(materialPrim)
self.assertTrue(materialSchema)
return materialSchema
def _GetSourceShader(self, inputOrOutput):
(connectableAPI, _, _) = inputOrOutput.GetConnectedSource()
self.assertTrue(connectableAPI.GetPrim().IsA(UsdShade.Shader))
shaderPrim = connectableAPI.GetPrim()
self.assertTrue(shaderPrim)
shader = UsdShade.Shader(shaderPrim)
self.assertTrue(shader)
return shader
def _ValidateUsdShader(self, shader, expectedInputTuples, expectedOutputs):
for expectedInputTuple in expectedInputTuples:
(inputName, expectedValue) = expectedInputTuple
shaderInput = shader.GetInput(inputName)
self.assertTrue(shaderInput)
if expectedValue is None:
self.assertFalse(shaderInput.GetAttr().HasAuthoredValueOpinion())
continue
# Validate the input value
value = shaderInput.Get()
if isinstance(value, float) or isinstance(value, Gf.Vec3f):
self.assertTrue(Gf.IsClose(value, expectedValue, 1e-6))
else:
self.assertEqual(value, expectedValue)
outputs = {
output.GetBaseName(): output.GetTypeName() for output in shader.GetOutputs()
}
self.assertEqual(outputs, expectedOutputs)
def _ValidateUsdShaderOuputs(self, shader, expectedOutputs):
# Validate shader only has one output
self.assertEqual(len(shader.GetOutputs()), 1)
# Validate output's value
shaderOuput = shader.GetOutputs()[0]
self.assertEqual(shaderOuput.GetBaseName(), expectedOutputs)
def generateStandaloneTestScene(self, attrTuples):
"""
Generate test scene containing a usdPreviewSurface with attribute values but no
connections authored exports correctly.
"""
maya_file = os.path.join(
self.temp_dir, "UsdExportStandaloneUsdPreviewSurfaceTest.ma"
)
cmds.file(force=True, new=True)
mesh = "StandaloneMaterialSphere"
cmds.polySphere(name=mesh, subdivisionsX=20, subdivisionsY=20, radius=1)
cmds.group(mesh, name="Geom")
cmds.group("Geom", name="UsdPreviewSurfaceExportTest")
shading_node = "usdPreviewSurface_Standalone"
cmds.shadingNode("usdPreviewSurface", name=shading_node, asShader=True)
for attr in attrTuples:
if isinstance(attr[1], Gf.Vec3f):
cmds.setAttr(
"%s.%s" % (shading_node, attr[0]),
attr[1][0],
attr[1][1],
attr[1][2],
)
else:
cmds.setAttr("%s.%s" % (shading_node, attr[0]), attr[1])
shading_engine = "%sSG" % shading_node
cmds.sets(
renderable=True, noSurfaceShader=True, empty=True, name=shading_engine
)
cmds.connectAttr(
"%s.outColor" % shading_node,
"%s.surfaceShader" % shading_engine,
force=True,
)
cmds.sets(mesh, edit=True, forceElement=shading_engine)
cmds.file(rename=maya_file)
cmds.file(save=True, type="mayaAscii")
self.assertTrue(os.path.exists(maya_file))
return maya_file
def generateConnectedTestScene(self, attrTuples):
"""
Generate test scene containing a UsdPreviewSurface with bindings to other shading nodes
exports correctly.
"""
maya_file = os.path.join(
self.temp_dir, "UsdExportConnectedUsdPreviewSurfaceTest.ma"
)
cmds.file(force=True, new=True)
mesh = "ConnectedMaterialSphere"
cmds.polySphere(name=mesh, subdivisionsX=20, subdivisionsY=20, radius=1)
cmds.group(mesh, name="Geom")
cmds.group("Geom", name="UsdPreviewSurfaceExportTest")
shading_node = "usdPreviewSurface_Connected"
cmds.shadingNode("usdPreviewSurface", name=shading_node, asShader=True)
for attr in attrTuples:
if isinstance(attr[1], Gf.Vec3f):
cmds.setAttr(
"%s.%s" % (shading_node, attr[0]),
attr[1][0],
attr[1][1],
attr[1][2],
)
else:
cmds.setAttr("%s.%s" % (shading_node, attr[0]), attr[1])
texture_dir = os.path.join(self.input_dir, "UsdExportUsdPreviewSurfaceTest")
cmds.defaultNavigation(
createNew=True, destination="%s.diffuseColor" % shading_node
)
file_node = cmds.shadingNode(
"file", asTexture=True, name="Brazilian_Rosewood_Texture"
)
cmds.setAttr(
file_node + ".fileTextureName",
os.path.join(texture_dir, "Brazilian_rosewood_pxr128.png"),
type="string",
)
cmds.connectAttr(
"%s.outColor" % file_node, "%s.diffuseColor" % shading_node, force=True
)
# This file node should have stayed "sRGB":
self.assertEqual(cmds.getAttr(file_node + ".colorSpace"), "sRGB")
cmds.defaultNavigation(
createNew=True, destination="%s.roughness" % shading_node
)
file_node = cmds.shadingNode(
"file", asTexture=True, name="Brazilian_Rosewood_Bump_Texture"
)
cmds.setAttr(
file_node + ".fileTextureName",
os.path.join(texture_dir, "Brazilian_rosewood_pxr128_bmp.png"),
type="string",
)
cmds.connectAttr(
"%s.outColorR" % file_node, "%s.roughness" % shading_node, force=True
)
# The monochrome file node should have been set to "Raw" automatically:
self.assertEqual(cmds.getAttr(file_node + ".colorSpace"), "Raw")
cmds.defaultNavigation(
createNew=True, destination="%s.clearcoatRoughness" % shading_node
)
cmds.connectAttr(
"%s.outColorR" % file_node,
"%s.clearcoatRoughness" % shading_node,
force=True,
)
cmds.defaultNavigation(createNew=True, destination="%s.normal" % shading_node)
file_node = cmds.shadingNode(
"file", asTexture=True, name="Brazilian_Rosewood_Normal_Texture"
)
cmds.setAttr(
file_node + ".fileTextureName",
os.path.join(texture_dir, "Brazilian_rosewood_pxr128_n.png"),
type="string",
)
cmds.connectAttr(
"%s.outColor" % file_node, "%s.normal" % shading_node, force=True
)
# The file node should have been set to "NormalMap" automatically:
self.assertEqual(cmds.getAttr(file_node + ".colorSpace"), "Raw")
self.assertEqual(cmds.getAttr(file_node + ".colorGainR"), 2)
self.assertEqual(cmds.getAttr(file_node + ".colorGainG"), 2)
self.assertEqual(cmds.getAttr(file_node + ".colorGainB"), 2)
self.assertEqual(cmds.getAttr(file_node + ".colorOffsetR"), -1)
self.assertEqual(cmds.getAttr(file_node + ".colorOffsetG"), -1)
self.assertEqual(cmds.getAttr(file_node + ".colorOffsetB"), -1)
self.assertEqual(cmds.getAttr(file_node + ".alphaGain"), 1)
self.assertEqual(cmds.getAttr(file_node + ".alphaOffset"), 0)
shading_engine = "%sSG" % shading_node
cmds.sets(
renderable=True, noSurfaceShader=True, empty=True, name=shading_engine
)
cmds.connectAttr(
"%s.outColor" % shading_node,
"%s.surfaceShader" % shading_engine,
force=True,
)
cmds.sets(mesh, edit=True, forceElement=shading_engine)
cmds.file(rename=maya_file)
cmds.file(save=True, type="mayaAscii")
self.assertTrue(os.path.exists(maya_file))
return maya_file
def testExportStandaloneUsdPreviewSurface(self):
"""
Tests that a usdPreviewSurface with attribute values but no
connections authored exports correctly.
"""
expectedInputTuples = [
("clearcoat", 0.1),
("clearcoatRoughness", 0.2),
("diffuseColor", Gf.Vec3f(0.3, 0.4, 0.5)),
("displacement", 0.6),
("emissiveColor", Gf.Vec3f(0.07, 0.08, 0.09)),
("ior", 1.1),
("metallic", 0.11),
("normal", Gf.Vec3f(0.12, 0.13, 0.14)),
("occlusion", 0.9),
("opacity", 0.8),
("roughness", 0.7),
("specularColor", Gf.Vec3f(0.3, 0.2, 0.1)),
("useSpecularWorkflow", 1),
]
maya_file = self.generateStandaloneTestScene(expectedInputTuples)
cmds.file(maya_file, force=True, open=True)
usd_file_path = os.path.join(self.temp_dir, "UsdPreviewSurfaceExportTest.usda")
cmds.mayaUSDExport(
mergeTransformAndShape=True, file=usd_file_path, shadingMode="useRegistry"
)
stage = Usd.Stage.Open(usd_file_path)
standaloneMaterial = self._GetUsdMaterial(
stage, "usdPreviewSurface_StandaloneSG"
)
surfaceOutput = standaloneMaterial.GetOutput(UsdShade.Tokens.surface)
previewSurfaceShader = self._GetSourceShader(surfaceOutput)
expectedShaderPrimPath = standaloneMaterial.GetPath().AppendChild(
"usdPreviewSurface_Standalone"
)
self.assertEqual(previewSurfaceShader.GetPath(), expectedShaderPrimPath)
self.assertEqual(previewSurfaceShader.GetShaderId(), "UsdPreviewSurface")
expectedOutputs = {
"surface": Sdf.ValueTypeNames.Token,
"displacement": Sdf.ValueTypeNames.Token,
}
self._ValidateUsdShader(
previewSurfaceShader, expectedInputTuples, expectedOutputs
)
# There should not be any additional inputs.
self.assertEqual(
len(previewSurfaceShader.GetInputs()), len(expectedInputTuples)
)
def testExportConnectedUsdPreviewSurface(self):
"""
Tests that a UsdPreviewSurface with bindings to other shading nodes
exports correctly.
"""
expectedInputTuples = [
("clearcoat", 0.1),
("specularColor", Gf.Vec3f(0.2, 0.2, 0.2)),
("useSpecularWorkflow", 1),
]
maya_file = self.generateConnectedTestScene(expectedInputTuples)
cmds.file(maya_file, force=True, open=True)
usd_file_path = os.path.join(self.temp_dir, "UsdPreviewSurfaceExportTest.usda")
cmds.mayaUSDExport(
mergeTransformAndShape=True, file=usd_file_path, shadingMode="useRegistry"
)
stage = Usd.Stage.Open(usd_file_path)
connectedMaterial = self._GetUsdMaterial(stage, "usdPreviewSurface_ConnectedSG")
surfaceOutput = connectedMaterial.GetOutput(UsdShade.Tokens.surface)
previewSurfaceShader = self._GetSourceShader(surfaceOutput)
expectedShaderPrimPath = connectedMaterial.GetPath().AppendChild(
"usdPreviewSurface_Connected"
)
self.assertEqual(previewSurfaceShader.GetPath(), expectedShaderPrimPath)
self.assertEqual(previewSurfaceShader.GetShaderId(), "UsdPreviewSurface")
expectedOutputs = {
"surface": Sdf.ValueTypeNames.Token,
"displacement": Sdf.ValueTypeNames.Token,
}
self._ValidateUsdShader(
previewSurfaceShader, expectedInputTuples, expectedOutputs
)
# There should be four more connected inputs in addition to the inputs
# with authored values.
self.assertEqual(
len(previewSurfaceShader.GetInputs()), len(expectedInputTuples) + 4
)
# Validate the UsdUvTexture prim connected to the UsdPreviewSurface's
# diffuseColor input.
diffuseColorInput = previewSurfaceShader.GetInput("diffuseColor")
difTexShader = self._GetSourceShader(diffuseColorInput)
self._ValidateUsdShaderOuputs(difTexShader, "rgb")
expectedShaderPrimPath = connectedMaterial.GetPath().AppendChild(
"Brazilian_Rosewood_Texture"
)
self.assertEqual(difTexShader.GetPath(), expectedShaderPrimPath)
self.assertEqual(difTexShader.GetShaderId(), "UsdUVTexture")
# Validate the UsdUvTexture prim connected to the UsdPreviewSurface's
# clearcoatRoughness and roughness inputs. They should both be fed by
# the same shader prim.
clearcoatRougnessInput = previewSurfaceShader.GetInput("clearcoatRoughness")
bmpTexShader = self._GetSourceShader(clearcoatRougnessInput)
rougnessInput = previewSurfaceShader.GetInput("roughness")
roughnessShader = self._GetSourceShader(rougnessInput)
self._ValidateUsdShaderOuputs(bmpTexShader, "r")
self._ValidateUsdShaderOuputs(roughnessShader, "r")
self.assertEqual(bmpTexShader.GetPrim(), roughnessShader.GetPrim())
expectedShaderPrimPath = connectedMaterial.GetPath().AppendChild(
"Brazilian_Rosewood_Bump_Texture"
)
self.assertEqual(bmpTexShader.GetPath(), expectedShaderPrimPath)
self.assertEqual(bmpTexShader.GetShaderId(), "UsdUVTexture")
# Validate the UsdUvTexture prim connected to the UsdPreviewSurface's
# normal input.
normalColorInput = previewSurfaceShader.GetInput("normal")
normalTexShader = self._GetSourceShader(normalColorInput)
self._ValidateUsdShaderOuputs(normalTexShader, "rgb")
expectedShaderPrimPath = connectedMaterial.GetPath().AppendChild(
"Brazilian_Rosewood_Normal_Texture"
)
self.assertEqual(normalTexShader.GetPath(), expectedShaderPrimPath)
self.assertEqual(normalTexShader.GetShaderId(), "UsdUVTexture")
if __name__ == "__main__":
unittest.main(verbosity=2)
|
def func1():
pass
def func2():
pass
d = {
"a": {
"b": func1
}
}
d["a"]["b"] = func2
d["a"]["b"]()
|
# Generated by Django 2.2.5 on 2019-10-18 19:11
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('defaults', '0008_auto_20191012_2339'),
]
operations = [
migrations.AddField(
model_name='blog',
name='featured',
field=models.IntegerField(choices=[(0, 'No'), (1, 'Everwhere'), (2, 'Category-only')], default=0),
),
migrations.AlterField(
model_name='blog',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 10, 19, 0, 41, 8, 938040)),
),
]
|
import argparse
import requests
from bs4 import BeautifulSoup
import pandas as pd
from gamestonk_terminal.helper_funcs import (
check_positive,
get_user_agent,
parse_known_args_and_warn,
)
def earnings_release_dates(l_args):
parser = argparse.ArgumentParser(
prog="up_earnings",
description="""Upcoming earnings release dates. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-p",
"--pages",
action="store",
dest="n_pages",
type=check_positive,
default=10,
help="Number of pages to read upcoming earnings from in Seeking Alpha website.",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=3,
help="Number of upcoming earnings release dates to print",
)
ns_parser = parse_known_args_and_warn(parser, l_args)
l_earnings = list()
for idx in range(0, ns_parser.n_pages):
if idx == 0:
url_next_earnings = "https://seekingalpha.com/earnings/earnings-calendar"
else:
url_next_earnings = (
f"https://seekingalpha.com/earnings/earnings-calendar/{idx+1}"
)
text_soup_earnings = BeautifulSoup(
requests.get(
url_next_earnings, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
for bs_stock in text_soup_earnings.findAll("tr", {"data-exchange": "NASDAQ"}):
l_stock = list()
for stock in bs_stock.contents[:3]:
l_stock.append(stock.text)
l_earnings.append(l_stock)
df_earnings = pd.DataFrame(l_earnings, columns=["Ticker", "Name", "Date"])
df_earnings["Date"] = pd.to_datetime(df_earnings["Date"])
df_earnings = df_earnings.set_index("Date")
pd.set_option("display.max_colwidth", -1)
for n_days, earning_date in enumerate(df_earnings.index.unique()):
if n_days > (ns_parser.n_num - 1):
break
print(f"Earning Release on {earning_date.date()}")
print("----------------------------------------------")
print(
df_earnings[earning_date == df_earnings.index][
["Ticker", "Name"]
].to_string(index=False, header=False)
)
print("")
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
*.py: Description of what * does.
Last Modified:
"""
__author__ = "Sathappan Muthiah"
__email__ = "sathap1@vt.edu"
__version__ = "0.0.1"
import unicodecsv
import os
from lxml import etree
import pdb
from parse import parse as textparse
def safe_gettext(tree_node):
if tree_node is not None and tree_node.text:
return tree_node.text
return ""
def safe_delete(mydict, key):
"""
Delete only if key present in dictionary
"""
if key in mydict:
del(mydict[key])
return mydict
class XMLTAG(dict):
def __init__(self, tag_src="./acqmedia_xmltags"):
flist = os.listdir(tag_src)
data = {}
for fl in flist:
with open(os.path.join(tag_src, fl)) as infile:
reader = unicodecsv.reader(infile, delimiter=",")
reader.next()
data.update(dict([l for l in reader]))
super(XMLTAG, self).__init__(data)
def __getitem__(self, key):
if not self.has_key(key):
return key
return super(XMLTAG, self).__getitem__(key)
class ACQUIRE_MEDIA(object):
def __init__(self, tag_src):
self.__xmltags__ = XMLTAG(tag_src=tag_src)
def parse(self, fileObj):
tree = etree.fromstring(fileObj.read())
nitf, resources = tree.getchildren()
head, body = nitf.getchildren()
doc = {'head': {'title': head.getchildren()[0].text},
'body': {}
}
bhead, bcontent = body.getchildren()
doc['body']['head'] = {ch.tag : ch.text for ch in bhead.getiterator()
if ch.text.strip()}
doc['body']['content'] = self._getcontent(bcontent)
doc['resources'] = self._getmeta(resources)
return doc
def _figure(self, fig):
fig_msg = {'caption': " ".join([l.strip() for l in
fig.itertext() if 'figcaption' not in l]),
'img': [img.attrib for img in fig.findall('img')],
'type': 'figure'
}
fig.getparent().remove(fig)
return fig_msg
def _href(self, a):
htext = " ".join([l.strip() for l in a.xpath('descendant-or-self::text()')])
return {'type': 'a',
htext: a.attrib.get('href', '')}
def _entities(self, ent):
return {"expr": ent.text.strip(),
"value": self.__xmltags__[ent.attrib['value']],
"neType": ent.tag}
def _div(self, div):
while (len(div) == 1 and (div.text is None or
div.text.strip() == "")):
div = div.getchildren()[0]
divcontent = self._getcontent(div, recurse_div=False)
div.getparent().remove(div)
return divcontent
def _getcontent(self, ctree, recurse_div=True):
div = map(self._div, ctree.findall('div'))
if not recurse_div and div:
div = map(self._combine, div)
def gettext(it):
return " ".join([l for l in it.xpath('descendant-or-self::text()')
if l.strip()])
figures = map(self._figure, ctree.xpath("descendant::figure|img"))
content = gettext(ctree)
#" ".join([gettext(l).strip() for l in
# ctree.xpath("descendant-or-self::*[not(ancestor-or-self::figure)]")
# if l.text]).strip()
entities = map(self._entities, ctree.xpath("descendant::*[@value]"))
href = map(self._href, ctree.xpath("descendant::a[@href]"))
res = {"content": content.strip(), "figures": figures,
"entities": entities, "href": href,
"parts": div}
if not recurse_div and len(res.get('parts', [])) == 1:
res = self._update(res, res['parts'][0])
safe_delete(res, 'parts')
return res
def _update(self, orig_div, newd):
orig_div['content'] += ("\n" + newd['content'])
orig_div['figures'].extend(newd['figures'])
orig_div['entities'].extend(newd['entities'])
orig_div['href'].extend(newd['href'])
return orig_div
def _combine(self, divs):
div = divs
for d in divs.get("parts", []):
d = self._combine(d)
self._update(div, d)
safe_delete(div, 'parts')
return div
def _getmeta(self, mtree):
"""
parse through the newsedge resource tree
"""
ns = "{http://www.xmlnews.org/namespaces/meta#}"
req_keys = ("publicationTime", "receivedTime", "expiryTime",
"releaseTime", "publishReason", "releaseStatus", "revision",
"type", "dateline", "bylineOrg", "providerSlug", "role",
"language", "companyCode", "keyword", "providerSymbol",
"providerCode")
req_tags = ("locationCode", "industryCode", "subjectCode")
resource_dict = {key: safe_gettext(mtree.find("{}{}".format(ns, key))) for key in req_keys}
tag_dict = {key: self.__xmltags__[safe_gettext(mtree.find("{}{}".format(ns, key)))]
for key in req_tags}
resource_dict.update(tag_dict)
resource_dict["link"] = None
ltree = mtree.xpath(".//*[contains(text(), 'Story Link')]")
if ltree:
resource_dict["link"] = textparse("AMSPIDER:Story Link={}", ltree[0].text)[0]
return resource_dict
if __name__ == "__main__":
with open("./newsedge/201602210000AMSPIDERPOSTMEDN_OttCit01_bf45030c5c3088dfb5b21c93ef65c8d8_8.xml") as infile:
aq = ACQUIRE_MEDIA(tag_src="./acqmedia_xmltags")
ss = aq.parse(infile)
|
"""Common Docker components."""
import os
import ssl
import docker
BASE_URL = os.environ.get("DOCKER_HOST")
TLS_CONFIG = None
if len(os.environ.get("DOCKER_TLS_VERIFY", "")):
if BASE_URL is None:
raise RuntimeError("DOCKER_HOST not set.")
BASE_URL = "https://{}".format(BASE_URL.split("://", 1)[-1])
CERT_PATH = os.environ.get("DOCKER_CERT_PATH", "")
if not len(CERT_PATH):
CERT_PATH = os.path.join(os.environ.get("HOME", ""), ".docker")
TLS_CONFIG = docker.tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=(os.path.join(CERT_PATH, "cert.pem"), os.path.join(CERT_PATH, "key.pem")),
ca_cert=os.path.join(CERT_PATH, "ca.pem"),
)
CLIENT = docker.from_env()
API_CLIENT = docker.APIClient(base_url=BASE_URL, tls=TLS_CONFIG)
|
from .file import FileFinder
def make_finder(url):
return FileFinder(url)
|
import pyqtgraph as pg
from PySide2 import QtCore, QtWidgets, QtGui
import pyqtgraph.functions as fn
import pandas as pd
from functools import partial
transparentCol = "#969696"
transparentStyle=f"background: transparent;color:{transparentCol};border-color: {transparentCol};border-width: 1px;border-style: solid;min-width: 3em;"
class Plotter():
def getPlot(self):
return pg.GraphicsLayout()
class FixedGraphicsLayout(pg.GraphicsLayout):
def addItem(self, item, row=None, col=None, rowspan=1, colspan=1):
"""
Add an item to the layout and place it in the next available cell (or in the cell specified).
The item must be an instance of a QGraphicsWidget subclass.
"""
if row is None:
row = self.currentRow
if col is None:
col = self.currentCol
self.items[item] = []
for i in range(rowspan):
for j in range(colspan):
row2 = row + i
col2 = col + j
if row2 not in self.rows:
self.rows[row2] = {}
self.rows[row2][col2] = item
self.items[item].append((row2, col2))
borderRect = QtGui.QGraphicsRectItem()
borderRect.setParentItem(self)
borderRect.setZValue(1e3)
borderRect.setPen(fn.mkPen(self.border))
self.itemBorders[item] = borderRect
#this does not work in the original src code. i dont care, just ignore the exception...
try: item.geometryChanged.connect(self._updateItemBorder)
except:bla = 1
self.layout.addItem(item, row, col, rowspan, colspan)
self.layout.activate() # Update layout, recalculating bounds.
# Allows some PyQtGraph features to also work without Qt event loop.
self.nextColumn()
def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create an empty GraphicsLayout and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`GraphicsLayout.__init__ <pyqtgraph.GraphicsLayout.__init__>`
Returns the created item.
"""
layout = FixedGraphicsLayout(**kargs)#we need to use the fixed version, otherwise we run into the same problems as with addItem()
self.addItem(layout, row, col, rowspan, colspan)
return layout
class SubPlot(FixedGraphicsLayout):
style = transparentStyle
color0 = transparentCol
def __init__(self, xdata, ydata, xnames, ynames, sharedCoords):
super().__init__()
self.xdata = xdata
self.ydata = ydata
self.xnames = xnames
self.ynames = ynames
self.sharedCoords = sharedCoords
self.lines = {}
self.addons = {}
self.proxys = []
#put data in dataframe
def xyname2s(idx,X,Y,yname):
comments = " / ".join(Y.attrs.get("comments",[]))
nameAndComments = f"{yname[0]} {comments} @ {yname[1]} [Y{idx}]"
return pd.Series(Y,index=X,name=nameAndComments)
self.df = pd.concat([xyname2s(idx,X,Y,yname).to_frame() for idx,(X,Y,yname) in enumerate(zip(xdata,ydata,ynames))],axis=1)
p = partial(self.updateData,self.df)
QtCore.QTimer.singleShot(0,p)
def updateData(self, data):
self.df = data
for k,v in self.addons.items():v.updateData(self.df)
def addRowColList(self, rowColList, rowWise = True, _parent=None,_col=0):
#we can use this to add widgets from a list.
#the list must have the form:
#[row1, row2, row3, ...]
#where each row is either itself a list (in this case we interpret it as columns and call this function recursively),
#or a QGraphicsWidget subclass, in which case we just append it,
#or a QWidget/QLayout subclass, in which case we build a proxy for it and add it
#If an item has a rowspan(colspan) attribute, we use it. (TBD)
colWise = not rowWise
if _parent is None:_parent=self
try: nrOfRows = len(_parent.rows)
except: nrOfRows = 0
try: elems = tuple(iter(rowColList))
except:
#if we are here, we are not a list
if isinstance(rowColList, QtWidgets.QGraphicsWidget):
row = 0 if colWise else nrOfRows+1
_parent.addItem(rowColList, row = row,col=_col)
rowColList._row=row
if isinstance(rowColList, pg.GraphicsLayout):
row = 0 if colWise else nrOfRows+1
_parent.addLayout(rowColList, row = row,col=_col)
rowColList._row=row
if isinstance(rowColList,QtWidgets.QWidget):
_parent.addWidget(rowColList)
if isinstance(rowColList,QtWidgets.QLayout):
_parent.addLayout(rowColList)
return
#if we are here, we can iterate
proxyneededlambda = lambda x: isinstance(x,QtWidgets.QWidget) or isinstance(x,QtWidgets.QLayout)
proxyneeded = all(proxyneededlambda(x) for x in elems)
if proxyneeded:
P = QtWidgets.QGraphicsProxyWidget()
self.proxys.append(P)
W = QtWidgets.QWidget()
W.setAttribute(QtCore.Qt.WA_TranslucentBackground)
W.setAttribute(QtCore.Qt.WA_NoSystemBackground)
L= QtWidgets.QHBoxLayout() if rowWise else QtWidgets.QVBoxLayout()
L.setSpacing(0)
L.setContentsMargins(0,0,0,0)
W.setLayout(L)
P.setWidget(W)
_parent.addItem(P, row = 0 if colWise else nrOfRows+1,col=_col)
_subparent = L
else:
_subparent = _parent.addLayout(row = 0 if colWise else nrOfRows+1,col=_col)
for idx,x in enumerate(elems):
col = 0 if colWise else idx
self.addRowColList(x,rowWise=not rowWise,_parent=_subparent,_col=col)
if proxyneeded:
_subparent.addItem(QtWidgets.QSpacerItem(10,10,QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding))
class Addon(QtCore.QObject):
name = "Addon"
row = 0
def __init__(self, parent):
super().__init__()
self.parent = parent
def toggle(self):
pass
def getGuiElements(self):
return []
def resolveConnections(self):
pass
def updateData(self,data):
pass
|
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View, DetailView
from django.views.generic.edit import DeleteView
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from .models import Player
from .forms import FormPlayer
from django.db.models import Q
import requests
import json
import random
class PlayersList(View):
players = []
template_name = 'app/players_list.html'
def get(self, request, *args,**kwargs):
query = Q()
if request.GET.get('term', False):
query = Q(name__icontains=request.GET['term'])
players = Player.objects.filter(query).order_by('rank_position')
paginator = Paginator(players, 100)
page = int(request.GET.get('page', 1))
try:
self.players = paginator.page(page)
except (EmptyPage, InvalidPage):
self.players = paginator.page(paginator.num_pages)
response = {
'players': self.players,
'actual': page, 'total': paginator.num_pages,
'next': page + 1, 'prev': page - 1,
'list_pages': range(1, paginator.num_pages + 1),
'st': request.GET.get('st', '0')
}
return render(request, self.template_name, response)
class PlayerCreate(View):
form_class = FormPlayer
form_name = "Cadastrar Player"
template_name = 'app/player_form.html'
def get(self, request, *args, **kwargs):
initial = {}
try:
r = requests.get('http://api.randomuser.me/')
data = json.loads(r.text)
data = data['results'][0]
initial = { 'name': "%s %s" % (data['name']['first'], data['name']['last']),
'rank_position': random.randrange(0, 2000)}
except Exception as e:
print(e)
form = self.form_class(initial=initial)
return render(request, self.template_name, {'form': form, 'form_name': self.form_name })
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
data = form.cleaned_data
player = Player(name=data['name'], rank_position=data['rank_position'])
player.save()
return HttpResponseRedirect('/')
return render( request, self.template_name, {'form': form, 'form_name': self.form_name })
class PlayerDetail(DetailView):
context_object_name = 'player'
queryset = Player.objects.all()
class PlayerUpdate(View):
form_class = FormPlayer
form_name = "Editar player"
template_name = 'app/player_form.html'
def get(self, request, *args,**kwargs):
self.initial = get_object_or_404(Player, pk=kwargs['pk'])
form = self.form_class(initial=self.initial.__dict__)
return render(request, self.template_name, {'form': form, 'form_name': self.form_name })
def post(self, request, *args, **kwargs):
self.player = get_object_or_404(Player, pk=kwargs['pk'])
form = self.form_class(request.POST)
if form.is_valid():
data = form.cleaned_data
self.player.name = data['name']
self.player.rank_position = data['rank_position']
self.player.save()
return HttpResponseRedirect('/')
return render( request, self.template_name, {'form': form, 'form_name': form_name })
class PlayerDelete(DeleteView):
model = Player
success_url = '/'
class AutoComplete(View):
def get(self, request, *args,**kwargs):
query = Q()
if request.GET.get('term', False):
query = Q(name__icontains=request.GET['term'])
players = Player.objects.filter(query)
data = [{'id': player.id, 'name': player.name, 'rank_position': player.rank_position} for player in players]
mimetype = "application/json;charset=UTF-8"
js = json.dumps({'results': data}, ensure_ascii=False).encode('utf8')
return HttpResponse(js, mimetype)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import argparse
import collections
import inspect
import json
import glob
import os
import sys
import shutil
import tempfile
from pathlib import PurePosixPath
from typing import Union, Optional, Any, List
from cryptography import x509
import cryptography.hazmat.backends as crypto_backends
from loguru import logger as LOG # type: ignore
CERT_OID_SGX_QUOTE = "1.2.840.113556.10.1.1"
def dump_to_file(output_path: str, obj: dict, dump_args: dict):
with open(output_path, "w") as f:
json.dump(obj, f, **dump_args)
def list_as_lua_literal(l):
return str(l).translate(str.maketrans("[]", "{}"))
LUA_FUNCTION_EQUAL_ARRAYS = """function equal_arrays(a, b)
if #a ~= #b then
return false
else
for k, v in ipairs(a) do
if b[k] ~= v then
return false
end
end
return true
end
end"""
DEFAULT_PROPOSAL_OUTPUT = "{proposal_name}_proposal.json"
DEFAULT_VOTE_OUTPUT = "{proposal_name}_vote_for.json"
def complete_proposal_output_path(
proposal_name: str,
proposal_output_path: Optional[str] = None,
common_dir: str = ".",
):
if proposal_output_path is None:
proposal_output_path = DEFAULT_PROPOSAL_OUTPUT.format(
proposal_name=proposal_name
)
if not proposal_output_path.endswith(".json"):
proposal_output_path += ".json"
proposal_output_path = os.path.join(common_dir, proposal_output_path)
return proposal_output_path
def complete_vote_output_path(
proposal_name: str, vote_output_path: Optional[str] = None, common_dir: str = "."
):
if vote_output_path is None:
vote_output_path = DEFAULT_VOTE_OUTPUT.format(proposal_name=proposal_name)
if not vote_output_path.endswith(".json"):
vote_output_path += ".json"
vote_output_path = os.path.join(common_dir, vote_output_path)
return vote_output_path
def add_arg_construction(
lines: list,
arg: Union[str, collections.abc.Sequence, collections.abc.Mapping],
arg_name: str = "args",
):
if isinstance(arg, str):
lines.append(f"{arg_name} = [====[{arg}]====]")
elif isinstance(arg, collections.abc.Sequence):
lines.append(f"{arg_name} = {list_as_lua_literal(arg)}")
elif isinstance(arg, collections.abc.Mapping):
lines.append(f"{arg_name} = {{}}")
for k, v in args.items():
add_arg_construction(lines, v, arg_name=f"{arg_name}.{k}")
else:
lines.append(f"{arg_name} = {arg}")
def add_arg_checks(
lines: list,
arg: Union[str, collections.abc.Sequence, collections.abc.Mapping],
arg_name: str = "args",
added_equal_arrays_fn: bool = False,
):
lines.append(f"if {arg_name} == nil then return false end")
if isinstance(arg, str):
lines.append(f"if not {arg_name} == [====[{arg}]====] then return false end")
elif isinstance(arg, collections.abc.Sequence):
if not added_equal_arrays_fn:
lines.extend(
line.strip() for line in LUA_FUNCTION_EQUAL_ARRAYS.splitlines()
)
added_equal_arrays_fn = True
expected_name = arg_name.replace(".", "_")
lines.append(f"{expected_name} = {list_as_lua_literal(arg)}")
lines.append(
f"if not equal_arrays({arg_name}, {expected_name}) then return false end"
)
elif isinstance(arg, collections.abc.Mapping):
for k, v in arg.items():
add_arg_checks(
lines,
v,
arg_name=f"{arg_name}.{k}",
added_equal_arrays_fn=added_equal_arrays_fn,
)
else:
lines.append(f"if not {arg_name} == {arg} then return false end")
def build_proposal(
proposed_call: str,
args: Optional[Any] = None,
inline_args: bool = False,
vote_against: bool = False,
):
LOG.trace(f"Generating {proposed_call} proposal")
proposal_script_lines = []
if args is None:
proposal_script_lines.append(f'return Calls:call("{proposed_call}")')
else:
if inline_args:
add_arg_construction(proposal_script_lines, args)
else:
proposal_script_lines.append("tables, args = ...")
proposal_script_lines.append(f'return Calls:call("{proposed_call}", args)')
proposal_script_text = "; ".join(proposal_script_lines)
proposal = {
"script": {"text": proposal_script_text},
}
if args is not None and not inline_args:
proposal["parameter"] = args
if vote_against:
proposal["ballot"] = {"text": "return false"}
vote_lines = [
"tables, calls = ...",
"if not #calls == 1 then return false end",
"call = calls[1]",
f'if not call.func == "{proposed_call}" then return false end',
]
if args is not None:
vote_lines.append("args = call.args")
add_arg_checks(vote_lines, args)
vote_lines.append("return true")
vote_text = "; ".join(vote_lines)
vote = {"ballot": {"text": vote_text}}
LOG.trace(f"Made {proposed_call} proposal:\n{json.dumps(proposal, indent=2)}")
LOG.trace(f"Accompanying vote:\n{json.dumps(vote, indent=2)}")
return proposal, vote
def cli_proposal(func):
func.is_cli_proposal = True
return func
@cli_proposal
def new_member(
member_cert_path: str, member_enc_pubk_path: str, member_data: Any = None, **kwargs
):
LOG.debug("Generating new_member proposal")
# Read certs
member_cert = open(member_cert_path).read()
member_keyshare_encryptor = open(member_enc_pubk_path).read()
# Script which proposes adding a new member
proposal_script_text = """
tables, args = ...
return Calls:call("new_member", args)
"""
# Proposal object (request body for POST /gov/proposals) containing this member's info as parameter
proposal = {
"parameter": {
"cert": member_cert,
"keyshare": member_keyshare_encryptor,
"member_data": member_data,
},
"script": {"text": proposal_script_text},
}
vote_against = kwargs.pop("vote_against", False)
if vote_against:
proposal["ballot"] = {"text": "return false"}
# Sample vote script which checks the expected member is being added, and no other actions are being taken
verifying_vote_text = f"""
tables, calls = ...
if #calls ~= 1 then
return false
end
call = calls[1]
if call.func ~= "new_member" then
return false
end
expected_cert = [====[{member_cert}]====]
if not call.args.cert == expected_cert then
return false
end
expected_keyshare = [====[{member_keyshare_encryptor}]====]
if not call.args.keyshare == expected_keyshare then
return false
end
return true
"""
# Vote object (request body for POST /gov/proposals/{proposal_id}/votes)
verifying_vote = {"ballot": {"text": verifying_vote_text}}
LOG.trace(f"Made new member proposal:\n{json.dumps(proposal, indent=2)}")
LOG.trace(f"Accompanying vote:\n{json.dumps(verifying_vote, indent=2)}")
return proposal, verifying_vote
@cli_proposal
def retire_member(member_id: int, **kwargs):
return build_proposal("retire_member", member_id, **kwargs)
@cli_proposal
def set_member_data(member_id: int, member_data: Any, **kwargs):
proposal_args = {"member_id": member_id, "member_data": member_data}
return build_proposal("set_member_data", proposal_args, **kwargs)
@cli_proposal
def new_user(user_cert_path: str, user_data: Any = None, **kwargs):
user_info = {"cert": open(user_cert_path).read()}
if user_data is not None:
user_info["user_data"] = user_data
return build_proposal("new_user", user_info, **kwargs)
@cli_proposal
def remove_user(user_id: int, **kwargs):
return build_proposal("remove_user", user_id, **kwargs)
@cli_proposal
def set_user_data(user_id: int, user_data: Any, **kwargs):
proposal_args = {"user_id": user_id, "user_data": user_data}
return build_proposal("set_user_data", proposal_args, **kwargs)
@cli_proposal
def set_lua_app(app_script_path: str, **kwargs):
with open(app_script_path) as f:
app_script = f.read()
return build_proposal("set_lua_app", app_script, **kwargs)
@cli_proposal
def set_js_app(app_script_path: str, **kwargs):
with open(app_script_path) as f:
app_script = f.read()
return build_proposal("set_js_app", app_script, **kwargs)
@cli_proposal
def deploy_js_app(bundle_path: str, **kwargs):
# read modules
if os.path.isfile(bundle_path):
tmp_dir = tempfile.TemporaryDirectory(prefix="ccf")
shutil.unpack_archive(bundle_path, tmp_dir.name)
bundle_path = tmp_dir.name
modules_path = os.path.join(bundle_path, "src")
modules = read_modules(modules_path)
# read metadata
metadata_path = os.path.join(bundle_path, "app.json")
with open(metadata_path) as f:
metadata = json.load(f)
# sanity checks
module_paths = set(module["name"] for module in modules)
for url, methods in metadata["endpoints"].items():
for method, endpoint in methods.items():
module_path = endpoint["js_module"]
if module_path not in module_paths:
raise ValueError(
f"{method} {url}: module '{module_path}' not found in bundle"
)
proposal_args = {
"bundle": {"metadata": metadata, "modules": modules},
}
return build_proposal("deploy_js_app", proposal_args, **kwargs)
@cli_proposal
def remove_js_app(**kwargs):
return build_proposal("remove_js_app", **kwargs)
@cli_proposal
def set_module(module_name: str, module_path: str, **kwargs):
module_name_ = PurePosixPath(module_name)
if not module_name_.is_absolute():
raise ValueError("module name must be an absolute path")
if any(folder in [".", ".."] for folder in module_name_.parents):
raise ValueError("module name must not contain . or .. components")
if module_name_.suffix == ".js":
with open(module_path) as f:
js = f.read()
proposal_args = {"name": module_name, "module": {"js": js}}
else:
raise ValueError("module name must end with .js")
return build_proposal("set_module", proposal_args, **kwargs)
@cli_proposal
def remove_module(module_name: str, **kwargs):
return build_proposal("remove_module", module_name, **kwargs)
def read_modules(modules_path: str) -> List[dict]:
modules = []
for path in glob.glob(f"{modules_path}/**/*.js", recursive=True):
rel_module_name = os.path.relpath(path, modules_path)
rel_module_name = rel_module_name.replace("\\", "/") # Windows support
with open(path) as f:
js = f.read()
modules.append({"name": rel_module_name, "module": {"js": js}})
return modules
@cli_proposal
def update_modules(module_name_prefix: str, modules_path: Optional[str], **kwargs):
LOG.debug("Generating update_modules proposal")
# Validate module name prefix
module_name_prefix_ = PurePosixPath(module_name_prefix)
if not module_name_prefix_.is_absolute():
raise ValueError("module name prefix must be an absolute path")
if any(folder in [".", ".."] for folder in module_name_prefix_.parents):
raise ValueError("module name prefix must not contain . or .. components")
if not module_name_prefix.endswith("/"):
raise ValueError("module name prefix must end with /")
# Read module files and build relative module names
modules = []
if modules_path:
modules = read_modules(modules_path)
proposal_args = {"prefix": module_name_prefix, "modules": modules}
return build_proposal("update_modules", proposal_args, **kwargs)
@cli_proposal
def remove_modules(module_name_prefix: str, **kwargs):
LOG.debug("Generating update_modules proposal (remove only)")
return update_modules(module_name_prefix, modules_path=None)
@cli_proposal
def trust_node(node_id: int, **kwargs):
return build_proposal("trust_node", node_id, **kwargs)
@cli_proposal
def retire_node(node_id: int, **kwargs):
return build_proposal("retire_node", node_id, **kwargs)
@cli_proposal
def new_node_code(code_digest: str, **kwargs):
code_digest_bytes = list(bytearray.fromhex(code_digest))
return build_proposal("new_node_code", code_digest_bytes, **kwargs)
@cli_proposal
def retire_node_code(code_digest: str, **kwargs):
code_digest_bytes = list(bytearray.fromhex(code_digest))
return build_proposal("retire_node_code", code_digest_bytes, **kwargs)
@cli_proposal
def accept_recovery(**kwargs):
return build_proposal("accept_recovery", **kwargs)
@cli_proposal
def open_network(**kwargs):
return build_proposal("open_network", **kwargs)
@cli_proposal
def rekey_ledger(**kwargs):
return build_proposal("rekey_ledger", **kwargs)
@cli_proposal
def update_recovery_shares(**kwargs):
return build_proposal("update_recovery_shares", **kwargs)
@cli_proposal
def set_recovery_threshold(threshold: int, **kwargs):
return build_proposal("set_recovery_threshold", threshold, **kwargs)
@cli_proposal
def update_ca_cert(cert_name, cert_path, skip_checks=False, **kwargs):
with open(cert_path) as f:
cert_pem = f.read()
if not skip_checks:
try:
cert = x509.load_pem_x509_certificate(
cert_pem.encode(), crypto_backends.default_backend()
)
except Exception as exc:
raise ValueError("Cannot parse PEM certificate") from exc
try:
oid = x509.ObjectIdentifier(CERT_OID_SGX_QUOTE)
_ = cert.extensions.get_extension_for_oid(oid)
except x509.ExtensionNotFound as exc:
raise ValueError(
"X.509 extension with SGX quote not found in certificate"
) from exc
args = {"name": cert_name, "cert": cert_pem}
return build_proposal("update_ca_cert", args, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-po",
"--proposal-output-file",
type=str,
help=f"Path where proposal JSON object (request body for POST /gov/proposals) will be dumped. Default is {DEFAULT_PROPOSAL_OUTPUT}",
)
parser.add_argument(
"-vo",
"--vote-output-file",
type=str,
help=f"Path where vote JSON object (request body for POST /gov/proposals/{{proposal_id}}/votes) will be dumped. Default is {DEFAULT_VOTE_OUTPUT}",
)
parser.add_argument(
"-pp",
"--pretty-print",
action="store_true",
help="Pretty-print the JSON output",
)
parser.add_argument(
"-i",
"--inline-args",
action="store_true",
help="Create a fixed proposal script with the call arguments as literals inside "
"the script. When not inlined, the parameters are passed separately and could "
"be replaced in the resulting object",
)
parser.add_argument(
"--vote-against",
action="store_true",
help="Include a negative initial vote when creating the proposal",
default=False,
)
parser.add_argument("-v", "--verbose", action="store_true")
# Auto-generate CLI args based on the inspected signatures of generator functions
module = inspect.getmodule(inspect.currentframe())
proposal_generators = inspect.getmembers(module, predicate=inspect.isfunction)
subparsers = parser.add_subparsers(
title="Possible proposals", dest="proposal_type", required=True
)
for func_name, func in proposal_generators:
# Only generate for decorated functions
if not hasattr(func, "is_cli_proposal"):
continue
subparser = subparsers.add_parser(func_name)
parameters = inspect.signature(func).parameters
func_param_names = []
for param_name, param in parameters.items():
if param.kind == param.VAR_POSITIONAL or param.kind == param.VAR_KEYWORD:
continue
if param.annotation == param.empty:
param_type = None
elif param.annotation == dict or param.annotation == Any:
param_type = json.loads
else:
param_type = param.annotation
add_argument_extras = {}
if param.default is None:
add_argument_extras["nargs"] = "?"
add_argument_extras["default"] = param.default # type: ignore
subparser.add_argument(param_name, type=param_type, **add_argument_extras) # type: ignore
func_param_names.append(param_name)
subparser.set_defaults(func=func, param_names=func_param_names)
args = parser.parse_args()
LOG.remove()
LOG.add(
sys.stdout,
format="<level>[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} | {message}</level>",
level="TRACE" if args.verbose else "INFO",
)
proposal, vote = args.func(
**{name: getattr(args, name) for name in args.param_names},
vote_against=args.vote_against,
inline_args=args.inline_args,
)
dump_args = {}
if args.pretty_print:
dump_args["indent"] = 2
proposal_path = complete_proposal_output_path(
args.proposal_type, proposal_output_path=args.proposal_output_file
)
LOG.success(f"Writing proposal to {proposal_path}")
dump_to_file(proposal_path, proposal, dump_args)
vote_path = complete_vote_output_path(
args.proposal_type, vote_output_path=args.vote_output_file
)
LOG.success(f"Wrote vote to {vote_path}")
dump_to_file(vote_path, vote, dump_args)
|
# (c) 2021 Michał Górny
# 2-clause BSD license
"""Tests for database support"""
import io
import unittest.mock
import pytest
from kuroneko.database import Database, Bug, DatabaseError
JSON_DATA = '''
{{"kuroneko-version": {version},
"bugs": [
{{"bug": 123456,
"packages": [["dev-foo/bar"],
[">=dev-foo/foo-1", "<dev-foo/foo-1.7"]],
"summary": "test bug",
"severity": "C4",
"created": "2021-01-01",
"resolved": false}}
]}}'''
EXPECTED_BUGS = {
123456: Bug(bug=123456,
packages=[['dev-foo/bar'],
['>=dev-foo/foo-1', '<dev-foo/foo-1.7']],
summary='test bug',
severity='C4',
created='2021-01-01',
resolved=False),
}
def test_load_database():
db = Database()
db.load(io.StringIO(
JSON_DATA.format(version='"{}.{}"'.format(*db.SCHEMA_VERSION))))
assert db.bugs == EXPECTED_BUGS
def test_round_robin():
db = Database()
db.bugs.update(EXPECTED_BUGS)
data = io.StringIO()
db.save(data)
data.seek(0)
db2 = Database()
db2.load(data)
assert db2.bugs == EXPECTED_BUGS
def test_no_magic():
db = Database()
with pytest.raises(DatabaseError):
db.load(io.BytesIO(b'{"bugs": []}'))
@unittest.mock.patch('kuroneko.database.Database.SCHEMA_VERSION', (2, 1))
@pytest.mark.parametrize('version', ['"2.0"', '"2.2"'])
def test_version_good(version):
db = Database()
db.load(io.BytesIO(JSON_DATA.format(version=version).encode()))
assert db.bugs == EXPECTED_BUGS
@unittest.mock.patch('kuroneko.database.Database.SCHEMA_VERSION', (2, 1))
@pytest.mark.parametrize('version', ['"1.0"', '"3.0"', '"2"', '2',
'"2.2.s"', '"fnord"', '""', '2.1'])
def test_version_bad(version):
db = Database()
with pytest.raises(DatabaseError):
db.load(io.BytesIO(JSON_DATA.format(version=version).encode()))
|
#
# Copyright (c) 2016 MasterCard International Incorporated
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# Neither the name of the MasterCard International Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from mastercardapicore import BaseObject
from mastercardapicore import RequestMap
from mastercardapicore import OperationConfig
from mastercardapicore import OperationMetadata
from .resourceconfig import ResourceConfig
class CardMapping(BaseObject):
"""
"""
__config = {
"93f59d06-df06-48ab-9633-208d0da481ca" : OperationConfig("/moneysend/v3/mapping/card", "create", [], []),
"25791dea-df4a-4e1f-8418-48d7cf03ee8b" : OperationConfig("/moneysend/v3/mapping/card/{mappingId}", "delete", [], []),
"ae8f3d37-0dd6-44ec-ac9b-16dad6a65ea0" : OperationConfig("/moneysend/v3/mapping/subscriber", "update", [], []),
"2bad23fe-1ff5-4ff9-bf32-74b620ccfb1e" : OperationConfig("/moneysend/v3/mapping/card", "update", [], []),
"d9e2cf3c-6d50-4a0f-9f9c-d48d1290cb57" : OperationConfig("/moneysend/v3/mapping/card/{mappingId}", "update", [], []),
}
def getOperationConfig(self,operationUUID):
if operationUUID not in self.__config:
raise Exception("Invalid operationUUID: "+operationUUI)
return self.__config[operationUUID]
def getOperationMetadata(self):
return OperationMetadata(ResourceConfig.getInstance().getVersion(), ResourceConfig.getInstance().getHost(), ResourceConfig.getInstance().getContext())
@classmethod
def create(cls,mapObj):
"""
Creates object of type CardMapping
@param Dict mapObj, containing the required parameters to create a new object
@return CardMapping of the response of created instance.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("93f59d06-df06-48ab-9633-208d0da481ca", CardMapping(mapObj))
@classmethod
def deleteById(cls,id,map=None):
"""
Delete object of type CardMapping by id
@param str id
@return CardMapping of the response of the deleted instance.
@raise ApiException: raised an exception from the response status
"""
mapObj = RequestMap()
if id:
mapObj.set("id", id)
if map:
if (isinstance(map,RequestMap)):
mapObj.setAll(map.getObject())
else:
mapObj.setAll(map)
return BaseObject.execute("25791dea-df4a-4e1f-8418-48d7cf03ee8b", CardMapping(mapObj))
def delete(self):
"""
Delete object of type CardMapping
@return CardMapping of the response of the deleted instance.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("25791dea-df4a-4e1f-8418-48d7cf03ee8b", self)
def deleteSubscriberID(self):
"""
Updates an object of type CardMapping
@return CardMapping object representing the response.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("ae8f3d37-0dd6-44ec-ac9b-16dad6a65ea0", self)
def read(self):
"""
Updates an object of type CardMapping
@return CardMapping object representing the response.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("2bad23fe-1ff5-4ff9-bf32-74b620ccfb1e", self)
def update(self):
"""
Updates an object of type CardMapping
@return CardMapping object representing the response.
@raise ApiException: raised an exception from the response status
"""
return BaseObject.execute("d9e2cf3c-6d50-4a0f-9f9c-d48d1290cb57", self)
|
# Imports
from os.path import join
# Input and output data
input_gz1 = join(config["data_dir"], "ont_DNA", "nanopolish_sample_1.tsv.gz")
input_gz2 = join(config["data_dir"], "ont_DNA", "nanopolish_sample_2.tsv.gz")
input_nc1 = join(config["data_dir"], "ont_DNA", "sniffles_1.vcf")
input_nc2 = join(config["data_dir"], "ont_DNA", "sniffles_1.vcf")
output_1 = "merged_1.txt"
output_2 = "merged_2.txt.gz"
output_3 = "merged_3.txt.gz"
# Rules
rule all:
input: [output_1, output_2, output_3]
rule nanopolish_concat_1:
input: tsv_list=[input_gz1, input_gz2]
output: tsv=output_1
log: "nanopolish_concat_1.log"
wrapper: "nanopolish_concat"
rule nanopolish_concat_2:
input: tsv_list=[input_nc1, input_nc2]
output: tsv=output_2
log: "nanopolish_concat_2.log"
wrapper: "nanopolish_concat"
rule nanopolish_concat_3:
input: tsv_list=[input_gz1, input_gz2, input_nc1, input_nc2]
output: tsv=output_3
log: "nanopolish_concat_3.log"
wrapper: "nanopolish_concat"
|
"""Command-line interface functionality for the Drover interface"""
import argparse
import logging
import sys
from pathlib import Path
import yaml
from pydantic import ValidationError
from drover import Drover, SettingsError, UpdateError
from drover.__metadata__ import VERSION
from drover.models import Settings
_logger = logging.getLogger(__name__)
def _parse_arguments():
parser = argparse.ArgumentParser(description=__doc__.partition('\n')[0])
parser.add_argument('--version', '-V', action='version', version=f'%(prog)s {VERSION}')
group = parser.add_mutually_exclusive_group()
group.add_argument('--verbose', '-v', action='count', default=0, help='increase output verbosity')
group.add_argument('--quiet', action='store_true', help='disable output')
group = parser.add_mutually_exclusive_group()
group.add_argument('--interactive', action='store_true', help='enable interactive output (i.e. for a PTY)')
group.add_argument('--non-interactive', action='store_true', help='disable interactive output')
parser.add_argument('--settings-file', default=Path('drover.yml'), type=Path,
help='Settings file name (default: "drover.yml")')
parser.add_argument('--install-path', default=Path(), type=Path,
help='Package install path (e.g. from "pip install -t"; default: working directory)')
parser.add_argument('stage', type=str)
return parser.parse_args()
def _parse_settings(settings_file_name: Path) -> Settings:
try:
with open(settings_file_name, 'r') as settings_file:
return Settings.parse_obj(yaml.safe_load(settings_file))
except (ValueError, ValidationError) as e:
_logger.error('Settings file is invalid: %s', e)
_logger.debug('', exc_info=e)
sys.exit(1)
except FileNotFoundError as e:
_logger.error('Settings file does not exist: %s', e)
_logger.debug('', exc_info=e)
sys.exit(1)
def main():
"""The main command-line entry point for the Drover interface"""
arguments = _parse_arguments()
if not arguments.quiet:
logging.basicConfig(format='%(message)s', stream=sys.stdout)
logging_level = max(1, logging.INFO - (10 * arguments.verbose))
logging.getLogger(__name__.split('.')[0]).setLevel(logging_level)
interactive = True if arguments.interactive else False if arguments.non_interactive else sys.__stdin__.isatty()
settings_file_name = arguments.settings_file
install_path: Path = arguments.install_path
settings: Settings = _parse_settings(settings_file_name)
try:
drover = Drover(settings, arguments.stage, interactive=interactive)
drover.update(install_path)
except SettingsError as e:
_logger.error('Initialization failed: %s', e)
_logger.debug('', exc_info=e)
sys.exit(1)
except UpdateError as e:
_logger.error('Update failed: %s', e)
_logger.debug('', exc_info=e)
sys.exit(1)
|
# This holds a large number of specific functions used in 'convert_script.py'.
import os
import sys
import json
import cv2
import dlib
# import `consts.py` from the parent directory
from os.path import abspath, join, dirname
sys.path.append(abspath(join(dirname(abspath(__file__)), '..')))
import consts
# indexes in dlib's 68-point landmark shape predictor
# taken from:
# https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/,
# https://www.pyimagesearch.com/wp-content/uploads/2017/04/facial_landmarks_68markup.jpg
l_eye_l_edge = 36
l_eye_r_edge = 39
r_eye_l_edge = 42
r_eye_r_edge = 45
# pre-load the face-detection from dlib so they we have them while we do cropping
detector = dlib.cnn_face_detection_model_v1(consts.face_detector_path)
predictor = dlib.shape_predictor(consts.facial_landmark_detector_path)
previous_write = ""
def write_flush(s):
global previous_write
sys.stdout.write(s)
sys.stdout.flush()
previous_write = s
def query_yes_no(init_msg, retry_msg, delayed_interrupt=None):
if delayed_interrupt is not None:
delayed_interrupt.disable()
write_flush(init_msg)
while True:
response = sys.stdin.readline().strip()
if response == 'y' or response == 'n':
if delayed_interrupt is not None:
delayed_interrupt.enable()
return response
write_flush(retry_msg)
def get_namespace(delayed_interrupt):
previous_line = previous_write
namespace = {}
if os.path.exists(consts.namespace_path):
with open(consts.namespace_path, 'r') as f:
namespace = json.load(f)
else:
response = query_yes_no('\rNo namespace file found. Continue? (y/n): ',
"Please enter 'y' or 'n'. Continue? (y/n): ",
delayed_interrupt)
if response == 'n':
print("Found no file at: '{}'".format(consts.namespace_path))
return None
write_flush(previous_line)
return namespace
def update_namespace(new_namespace):
with open(consts.namespace_path, 'w+') as f:
json.dump(new_namespace, f)
# This function is separated so that it can be changed later
def parse_import_filename(filename):
id_and_ext = filename.split('.')
# returns id, ext
return id_and_ext[0], id_and_ext[1]
# renames the file with the given name, and returns the new name
#
# This function assumes that our working directory is the top-level directory of the project
def rename_with_namespace(delayed_interrupt, filename):
sentence_id, ext = parse_import_filename(filename)
namespace = get_namespace(delayed_interrupt)
if namespace is None:
return None
n = namespace[sentence_id] if sentence_id in namespace else 0
namespace[sentence_id] = n + 1
new_filename = "{}-{}.{}".format(sentence_id, n, ext)
new_filepath = os.path.join(consts.import_dir, new_filename)
old_filepath = os.path.join(consts.import_dir, filename)
os.rename(old_filepath, new_filepath)
# We wait until the end to update `namespace` because we don't want to write to the file with
# our changes to `namespace` until we know that we need to (i.e. until we've renamed the file)
#
# In reality, we wait on every part of this to finish, because of how it's called in
# `convert_script.py`, but -- in case that doesn't work (or the process is killed) -- it's good
# to be safe about it.
# The specific concern here is that `os.rename` fails
update_namespace(namespace)
return new_filename
# moves the given directory entry (e.g. file, subdirectory) in `init_dir` with the given `filename`
# into `targ_dir`
def move(filename, init_dir, targ_dir):
os.rename(os.path.join(init_dir, filename), os.path.join(targ_dir, filename))
# makes a subdirectory with `subdir_name` within `containing_dir` and outputs a sequence of images to
def convert_to_imgs(filename, subdir_name, containing_dir):
filepath = os.path.join(containing_dir, filename)
subdir_path = os.path.join(containing_dir, subdir_name)
os.makedirs(subdir_path)
file_names = os.path.join(subdir_path, consts.img_file_format)
os.system("ffmpeg -loglevel panic -i {} -r {} {}".format(filepath, consts.fps, file_names))
def crop_image(i, img_path, face_box):
# unfortunately, it appears we can't simply use the same image both times; we have to
# independently load it twice. In its C++ documentation, dlib lists `cv_image()` as a method
# for generating a dlib image from an opencv image, but that feature does not seem to be
# available for Python.
shape = predictor(dlib.load_rgb_image(img_path), face_box)
img = cv2.imread(img_path)
output_dim_ratio = float(consts.output_height) / float(consts.output_width)
def crop_helper(write_file, l_edge_index, r_edge_index):
# get the bounds of the cropped region on the x-axis
x1 = shape.part(l_edge_index).x
x2 = shape.part(r_edge_index).x
# from our given x-axis bounds, determine our y coordinates. We'll center the cropped
# region at the average of the corners of the eye (given by the shape indexes)
y_center = (shape.part(l_edge_index).y + shape.part(r_edge_index).y) / 2
height = output_dim_ratio * (x2 - x1)
y1 = int(y_center - height/2)
y2 = int(y_center + height/2)
# get the new, cropped image and scale it
new_img = img[y1:y2, x1:x2]
scale_factor = float(consts.output_height) / float(int(height))
new_width = (x2 - x1) * scale_factor
new_height = (y2 - y1) * scale_factor
new_img = cv2.resize(new_img, (int(new_width), int(new_height)))
# save the image
cv2.imwrite(write_file, new_img)
crop_helper(consts.left_eye_format.format(i), l_eye_l_edge, l_eye_r_edge)
crop_helper(consts.right_eye_format.format(i), r_eye_l_edge, r_eye_r_edge)
|
from discord.ext import commands
import perms
import discord, json, asyncio
with open("loan.json") as file:
bank = json.load(file)
class Loan(commands.Cog):
'''Loan Commands'''
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def loan(self,ctx):
'''Loan help command'''
e= discord.Embed(title="Loan Command",description="Do you have less money in your wallet ? you can take a loan\n\n"
"**Everyone**\n"
"**loan request** (id) (Amount Loaned) (Amount Owed) \n",color=discord.Color.blue())
e.add_field(name="**Owners + Tessarect Special Officers**",value="**loan accept** (Discord) (Amount Loaned) (Amount Owed)\n"
"**loan deny** (Discord) (Amount Loaned) (Reason)\n")
e.add_field(name="**Developers and above**",value= "**loan payback** (Discord) (Amount Paidback)\n"
"**loan update**\n")
#e.add_field(name="CURRENT BALANCE OF BANK",value=f"")
await ctx.send(embed=e)
@commands.has_any_role(perms.captain_role, perms.owner_role)
@loan.command()
async def accept(self,ctx, user: discord.User, loaned: int, payback: int):
'''Accept a loan'''
if (loaned) >= ((bank["totals"]["balance"]-bank["totals"]["deposits"])// 25):
await ctx.send(f"The loan of {loaned:,} ֍ is too big and will take us under our 25% reserve. Please decline this loan")
else:
todo_channel = self.bot.get_channel(929333807893598238)
bank_channel = self.bot.get_channel(929332268688887908)
await todo_channel.send(f"{ctx.author.mention} **, collect {payback:,} ֍ from {user.mention}.** They borrowed {loaned:,} ֍ from the Amteor International Bank.")
await bank_channel.send(f"**-{loaned:,} ֍** Loaned to {user.mention}. Will pay back {payback:,} ֍.")
bank["totals"]["balance"] -= loaned
bank["totals"]["loans"] += payback
with open("loan.json", "w") as file:
json.dump(bank, file)
await ctx.user.send(f"You have given {user.mention} a loan of {loaned:,} ֍. They will pay back with {payback:,} ֍.IT IS NOT CREDITED BTW AS THIS COMMAND IS UNDER DEVELOPMENT")
@commands.has_any_role(perms.captain_role, perms.owner_role)
@loan.command()
async def deny(self, ctx, user: discord.User, amount: int, *, reason):
'''Decline a loan'''
await ctx.send(f"{user} has been denied of their {amount:,} ֍ loan.")
await user.send(f"Your loan of {amount:,} ֍ has been denied. Reason: {reason}")
@commands.has_role(perms.staff_role)
@loan.command()
async def payback(self, ctx, user: discord.User, paidback: int):
'''Confirm a users loan repayment'''
bank_channel = self.bot.get_channel(929332268688887908)
bank["totals"]["balance"] += paidback
await bank_channel.send(f"**+{paidback:,} ֍** Loan payment from {user.mention}")
with open("loan.json", "w") as file:
json.dump(bank, file)
await ctx.send(f"Loan payment from {user.mention} of {paidback:,} ֍. Processed")
@loan.command()
async def request(self, ctx, id: str, amount: int, payback: int):
'''Request a loan from the clan'''
loan_channel = self.bot.get_channel(929332268688887908)
await loan_channel.send(f"@everyone, discord user {ctx.author.mention} has requested a {amount:,} gem loan. They have offered to pay back {payback:,} ֍, and their id is `{id}`. Do a!loan {ctx.author.mention} {amount} {payback} to accept their loan, or if their loan is denied, do a!loan deny {ctx.author.mention} {amount} reason")
await ctx.send(f"Your loan request for {amount:,} ֍ has been sent. You have offered to pay back the loan with {payback:,} ֍. ")
@commands.has_role(perms.staff_role)
@loan.command()
async def update(self, ctx):
'''Gets an update on number of ֍ loaned to users'''
loan_amount = bank["totals"]["loans"]
await ctx.send(f"Total amount of ֍ waiting to be paid back from loans is {loan_amount:,} ֍")
def setup(bot):
bot.add_cog(Loan(bot))
|
import machine
#Setup 2 digital inputs for buttons
ButtonA = machine.Pin(0, machine.Pin.IN,
machine.Pin.PULL_DOWN)
ButtonB = machine.Pin(1,machine.Pin.IN,
machine.Pin.PULL_DOWN)
#setup a PWM Output
Buzzer = machine.PWM(machine.Pin(15))
Buzzer.duty_u16(32767) #make it 50% duty cycle (32767/65535)
Frequency = 1000 #set a starting frequency of 1 Khz
def ButtonIRQHandler(pin):
global Frequency
if pin == ButtonA: #up the frequency
if Frequency < 2000:
Frequency += 50
elif pin == ButtonB: #lower the frequency
if Frequency > 100:
Frequency -= 50
#setup the IRQ and hook it to the handler
ButtonA.irq(trigger = machine.Pin.IRQ_RISING,
handler = ButtonIRQHandler)
ButtonB.irq(trigger = machine.Pin.IRQ_RISING,
handler = ButtonIRQHandler)
while True:
Buzzer.freq(Frequency)
|
from decimal import Decimal
from django.contrib import messages
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.views.generic import FormView
from wtforms import SelectField, BooleanField, validators, StringField, DecimalField, widgets
from web_payments.forms import PaymentForm
from web_payments.django import get_payment_model
from web_payments import RedirectNeeded, PaymentStatus, FraudStatus
class PaymentObForm(PaymentForm):
action = SelectField("Action:", validators=[validators.InputRequired()], choices=[('',''),("capture", "capture"), ("refund", "refund"), ("fail", "fail"), ("fraud", "fraud"), ("success", "success")], render_kw={"onchange": "hideunrelated(this.value)"})
amount = DecimalField("Amount:", validators=[validators.Optional()])
final = BooleanField("Final?", validators=[validators.Optional()])
message = StringField("Message:", validators=[validators.Optional()])
class PayObView(FormView):
template_name = "payob.html"
success_url = reverse_lazy("select-form")
def dispatch(self, request, *args, **kwargs):
self.payment = get_payment_model().objects.get(id=kwargs["id"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form_is_local'] = True
context["mytitle"] = "Payment Object: %s" % self.payment.id
context["payment_fields"] =[(f.verbose_name, getattr(self.payment, f.name)) for f in self.payment._meta.get_fields()]
context["payoblist"] = get_payment_model().objects.all()
return context
def get_form(self, form_class=None):
return PaymentObForm(formdata=self.get_form_kwargs().get("data", None))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form = self.get_form()
if form.validate():
messages.add_message(request, messages.SUCCESS, "Payment update successfull")
return self.form_valid(form)
else:
messages.add_message(request, messages.ERROR, "Payment update failed")
return self.form_invalid(form)
def form_valid(self, form):
data = form.data
if data["action"] == "capture":
captured = self.payment.capture(data["amount"], data["final"])
if captured:
messages.add_message(self.request, messages.SUCCESS, "Captured: %s" % captured)
elif data["action"] == "refund":
refunded = self.payment.refund(data["amount"])
if refunded:
messages.add_message(self.request, messages.SUCCESS, "Refunded: %s" % refunded)
elif data["action"] == "fail":
self.payment.change_status(PaymentStatus.ERROR, data["message"])
elif data["action"] == "fraud":
self.payment.change_fraud_status(FraudStatus.REJECT, data["message"])
elif data["action"] == "success":
self.payment.change_status(PaymentStatus.CONFIRMED)
return super().form_valid(form)
class PaymentView(FormView):
template_name = "form.html"
def dispatch(self, request, *args, **kwargs):
self.payment = get_payment_model().objects.get(id=kwargs["id"])
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
if not self.payment.provider._capture:
return reverse("paymentob", kwargs={"id": self.payment.id})
else:
return reverse("select-form")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['safe_urls'] = ["", reverse("payment-form", kwargs={"id": self.payment.id}), reverse("select-form")]
#context["object"] = get_payment_model().objects.get(id=self.kwargs["id"])
context["payoblist"] = get_payment_model().objects.all()
context["mytitle"] = "Payment"
return context
def get_form(self, form_class=None):
return self.payment.get_form(self.get_form_kwargs().get("data", None))
def get(self, request, *args, **kwargs):
try:
return super().get(request, *args, **kwargs)
except RedirectNeeded as exc:
messages.add_message(request, messages.SUCCESS, "Payment redirects to %s" % exc.args[0])
return HttpResponseRedirect(exc.args[0])
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
try:
form = self.get_form()
except RedirectNeeded as exc:
messages.add_message(request, messages.SUCCESS, "Payment redirects to %s" % exc.args[0])
return HttpResponseRedirect(exc.args[0])
#except Exception as exc:
# return HttpResponseBadRequest(exc, content_type="text/plain")
if form.validate():
messages.add_message(request, messages.SUCCESS, "Payment succeeded")
return self.form_valid(form)
else:
messages.add_message(request, messages.ERROR, "Payment failed")
return self.form_invalid(form)
class SelectPaymentForm(PaymentForm):
variant = SelectField("Payment Method", validators=[validators.InputRequired()])
total = DecimalField("Total amount", validators=[])
currency = StringField("Currency", validators=[])
billing_first_name = StringField("First Name", validators=[validators.Length(max=255)])
billing_last_name = StringField("Last Name", validators=[validators.Length(max=255)])
billing_address_1 = StringField("Address", validators=[validators.Length(max=255)])
billing_address_2 = StringField("Address extension", validators=[validators.Length(max=255)])
billing_email = StringField("Email", widget=widgets.Input("email"), validators=[validators.Length(max=255), validators.Email(), validators.Optional()])
billing_city = StringField("City", validators=[validators.Length(max=255)])
billing_postcode = StringField("Post code", validators=[validators.Length(max=20)])
billing_country_code = StringField("Country code", validators=[validators.Length(min=2, max=2), validators.Optional()])
billing_country_area = StringField("Country area", validators=[validators.Length(max=255)])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.variant.choices = [(x.extra["name"], x.extra.get("localized_name", x.extra["name"])) for x in get_payment_model().list_providers()]
class SelectView(FormView):
template_name = "form.html"
initial = {"currency": "EUR", "total": Decimal("10.0")}
def get_success_url(self):
return reverse("payment-form", kwargs={"id": self.payment.id})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form_is_local'] = True
context["mytitle"] = "Select"
context["payoblist"] = get_payment_model().objects.all()
return context
def get_form(self, form_class=None):
formkwargs = self.get_form_kwargs()
return SelectPaymentForm(formdata=formkwargs.get("data", None), data=formkwargs["initial"])
def form_valid(self, form):
self.payment = get_payment_model().objects.create(**form.data)
if self.payment.provider._capture:
self.payment.captured_amount = self.payment.total
self.payment.save()
return super().form_valid(form)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form = self.get_form()
if form.validate():
return self.form_valid(form)
else:
return self.form_invalid(form)
|
from lib import puzzle
def part1(data: str):
lower = int(data.split('-')[0])
upper = int(data.split('-')[1])
results = []
for i in range(lower, upper + 1):
if i == int(''.join(sorted(str(i)))):
prev = str(i)[0]
valid = False
for j in str(i)[1:]:
if j == prev:
valid = True
prev = j
if valid:
results.append(i)
return len(results)
def part2(data: str):
lower = int(data.split('-')[0])
upper = int(data.split('-')[1])
results = []
for i in range(lower, upper + 1):
if i == int(''.join(sorted(str(i)))):
prev = str(i)[0]
prev2 = str(i)[1]
prev3 = str(i)[2]
if prev == prev2 and prev2 != prev3:
valid = True
else:
valid = False
prev4 = str(i)[3]
if prev2 == prev3 and prev != prev2 and prev3 != prev4:
valid = True
prev5 = str(i)[4]
if prev3 == prev4 and prev2 != prev3 and prev4 != prev5:
valid = True
prev6 = str(i)[5]
if prev4 == prev5 and prev3 != prev4 and prev5 != prev6:
valid = True
if prev5 == prev6 and prev5 != prev4:
valid = True
if valid:
results.append(i)
return len(results)
class Day04(puzzle.Puzzle):
year = '2019'
day = '4'
def get_data(self) -> str:
return self.input_data
def run(self):
print(f'Answer part 1: {part1(self.get_data())}')
print(f'Answer part 2: {part2(self.get_data())}')
|
import setuptools
from ublock import VERSION_STR
setuptools.setup(
name='ublock',
version=VERSION_STR,
description='a toolkit for the control of trial-based behavioral tasks',
url='https://github.com/gwappa/python-ublock',
author='Keisuke Sehara',
author_email='keisuke.sehara@gmail.com',
license='MIT',
install_requires=[
'pyqtgraph>=0.10',
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
packages=['ublock',],
entry_points={
# nothing for the time being
}
)
|
from flask import Flask, jsonify
from gevent.wsgi import WSGIServer
from collections import deque
import logging
import binascii
import decimal
class Logger(object):
""" A dummy file object to allow using a logger to log requests instead
of sending to stderr like the default WSGI logger """
logger = None
def write(self, s):
self.logger.info(s.strip())
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
class ServerMonitor(WSGIServer):
""" Provides a few useful json endpoints for viewing server health and
performance. """
def __init__(self, manager):
self.logger = logging.getLogger(self.__class__.__name__)
self.manager = manager
self.settings = self.manager.settings
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.add_url_rule('/', 'general', self.general)
self.app = app
def start(self, *args, **kwargs):
WSGIServer.__init__(self, self.settings['ui_address'], self.app,
spawn=100, log=Logger())
self.logger.info("Monitoring port listening on {}"
.format(self.settings['ui_address']))
# Monkey patch the wsgi logger
Logger.logger = self.logger
WSGIServer.start(self, *args, **kwargs)
def stop(self, *args, **kwargs):
WSGIServer.stop(self)
self.logger.info("Exit")
def general(self):
conns = []
for conn in self.manager.peermgr.peers:
conns.append(dict(height=conn.remote_height,
protocol_version=conn.ver_send,
client_version=conn.client_version,
address="{}:{}".format(conn.dstaddr, conn.dstport)))
data = dict(height=self.manager.chaindb.getheight(),
hash=binascii.hexlify(self.manager.chaindb.gettophash()[::-1]),
peer_count=len(self.manager.peermgr.peers),
peers=conns)
return jsonify(jsonize(data))
def jsonize(item):
""" Recursive function that converts a lot of non-serializable content
to something json.dumps will like better """
if isinstance(item, dict):
new = {}
for k, v in item.iteritems():
k = str(k)
if isinstance(v, deque):
new[k] = jsonize(list(v))
else:
new[k] = jsonize(v)
return new
elif isinstance(item, list) or isinstance(item, tuple):
new = []
for part in item:
new.append(jsonize(part))
return new
else:
if isinstance(item, str):
return item.encode('string_escape')
elif isinstance(item, set):
return list(item)
elif isinstance(item, decimal.Decimal):
return float(item)
elif isinstance(item, (int, long, bool, float)) or item is None:
return item
elif hasattr(item, "__dict__"):
return {str(k).encode('string_escape'): str(v).encode('string_escape')
for k, v in item.__dict__.iteritems()}
else:
return str(item)
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
from setuptools import setup
deps = [ 'pyspotify', 'pyalsaaudio' ]
setup(name='Tylyfy',
version='0.0.2',
description='CLI-based Spotify player',
author='Kacper Żuk',
author_email='kacper.b.zuk+tylyfy@gmail.com',
url='https://github.com/kacperzuk/Tylyfy',
packages=['Tylyfy'],
scripts=['tylyfy'],
install_requires = [
'pyspotify>=2.0.0b2',
'pyalsaaudio>=0.7'
],
license='BSD'
)
|
#FLM: RoboFab Intro, Kerning
#
#
# demo of RoboFab kerning.
#
#
# NOTE: this will mess up the kerning in your test font.
from robofab.world import CurrentFont
# (make sure you have a font with some kerning opened in FontLab)
f = CurrentFont()
# If you are familiar with the way RoboFog handled kerning,
# you will feel right at home with RoboFab's kerning implementation.
# As in RoboFog, the kerning object walks like a dict and talks like a
# dict, but it's not a dict. It is a special object that has some features
# build specifically for working with kerning. Let's have a look!
kerning = f.kerning
# A general note about use the kerning object in FontLab. In FontLab, kerning
# data lives in individual glyphs, so to access it at the font level we must go
# through every glyph, gathering kerning pairs as we go. This process occurs
# each time you call font.kerning. So, to speed thinks up, it is best to reference
# it with an assignment. This will keep it from being generated every time you
# you call and attribute or make a change.
# kerning gives you access to some bits of global data
print "%s has %s kerning pairs"%(f.info.postscriptFullName, len(kerning))
print "the average kerning value is %s"%kerning.getAverage()
min, max = kerning.getExtremes()
print "the largest kerning value is %s"%max
print "the smallest kerning value is %s"%min
# ok, kerning.getExtremes() may be a little silly, but it could have its uses.
# kerning pairs are accesed as if you are working with a dict.
# (left glyph name, right glyph name)
kerning[('V', 'o')] = -14
print '(V, o)', kerning[('V', 'o')]
# if you want to go through all kerning pairs:
for pair in kerning:
print pair, kerning[pair]
# kerning also has some useful methods. A few examples:
# scale all kerning!
print 'scaling...'
kerning.scale(100)
print "the average kerning value is %s"%kerning.getAverage()
min, max = kerning.getExtremes()
print "the largest kerning value is %s"%max
print "the smallest kerning value is %s"%min
# get a count of pairs that contian certain glyphs
print 'counting...'
count = kerning.occurrenceCount(['A', 'B', 'C'])
for glyphName in count.keys():
print "%s: found in %s pairs"%(glyphName, count[glyphName])
# don't forget to update the font after you have made some changes!
f.update()
|
# Copyright (c) 2019, NVIDIA CORPORATION.
from libgdf_cffi import libgdf, ffi
import nvstrings
from cudf.dataframe.column import Column
from cudf.dataframe.dataframe import DataFrame
from cudf.dataframe.datetime import DatetimeColumn
from cudf.dataframe.numerical import NumericalColumn
from cudf.utils import ioutils
import pyarrow.parquet as pq
import numpy as np
import warnings
import os
import errno
def _wrap_string(text):
if text is None:
return ffi.NULL
else:
return ffi.new("char[]", text.encode())
@ioutils.doc_read_parquet()
def read_parquet(path, engine='cudf', *args, **kwargs):
"""{docstring}"""
if engine == 'cudf':
# Setup arguments
pq_reader = ffi.new('pq_read_arg*')
if not os.path.isfile(path) and not os.path.exists(path):
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT), path)
source_ptr = _wrap_string(str(path))
pq_reader.source_type = libgdf.FILE_PATH
pq_reader.source = source_ptr
usecols = kwargs.get("columns")
if usecols is not None:
arr_cols = []
for col in usecols:
arr_cols.append(_wrap_string(col))
use_cols_ptr = ffi.new('char*[]', arr_cols)
pq_reader.use_cols = use_cols_ptr
pq_reader.use_cols_len = len(usecols)
# Call to libcudf
libgdf.read_parquet(pq_reader)
out = pq_reader.data
if out == ffi.NULL:
raise ValueError("Failed to parse data")
# Extract parsed columns
outcols = []
new_names = []
for i in range(pq_reader.num_cols_out):
if out[i].dtype == libgdf.GDF_STRING:
ptr = int(ffi.cast("uintptr_t", out[i].data))
new_names.append(ffi.string(out[i].col_name).decode())
outcols.append(nvstrings.bind_cpointer(ptr))
else:
newcol = Column.from_cffi_view(out[i])
new_names.append(ffi.string(out[i].col_name).decode())
if newcol.dtype.type == np.datetime64:
outcols.append(
newcol.view(DatetimeColumn, dtype='datetime64[ms]')
)
else:
outcols.append(
newcol.view(NumericalColumn, dtype=newcol.dtype)
)
# Construct dataframe from columns
df = DataFrame()
for k, v in zip(new_names, outcols):
df[k] = v
# Set column to use as row indexes if available
if pq_reader.index_col != ffi.NULL:
df = df.set_index(df.columns[pq_reader.index_col[0]])
else:
warnings.warn("Using CPU via PyArrow to read Parquet dataset.")
pa_table = pq.read_pandas(path, *args, **kwargs)
df = DataFrame.from_arrow(pa_table)
return df
@ioutils.doc_to_parquet()
def to_parquet(df, path, *args, **kwargs):
"""{docstring}"""
warnings.warn("Using CPU via PyArrow to write Parquet dataset, this will "
"be GPU accelerated in the future")
pa_table = df.to_arrow()
pq.write_to_dataset(pa_table, path, *args, **kwargs)
|
import os
def list():
"""list all importable datasets"""
path = os.path.dirname(os.path.abspath(__file__))
for _, _, files in os.walk(path):
for file in files:
if file.endswith('.py') and file != '__init__.py':
print(file[:-3])
|
__author__ = 'jwely'
import os
import tarfile
import gzip
import zipfile
from dnppy import core
__all__ = ["extract_archive"]
def extract_archive(filepaths, delete_originals = False):
"""
Input list of filepaths OR a directory path with compressed
files in it. Attempts to decompress the following formats
Support formats include ``.tar.gz``, ``.tar``, ``.gz``, ``.zip``.
:param filepaths: list of filepaths to archives for extraction
:param delete_originals: Set to "True" if archives may be deleted after
their contents is successful extracted.
"""
filepaths = core.enf_filelist(filepaths)
for filepath in filepaths:
head,tail = os.path.split(filepath)
if filepath.endswith(".tar.gz"):
with tarfile.open(filepath, 'r:gz') as tfile:
outdir = os.path.join(head, tail.replace(".tar.gz",""))
tfile.extractall(outdir)
# gzip only compresses single files
elif filepath.endswith(".gz"):
with gzip.open(filepath, 'rb') as gzfile:
outfile = os.path.join(head, tail.replace(".gz",""))
content = gzfile.read()
with open(outfile, 'wb') as of:
of.write(content)
elif filepath.endswith(".tar"):
with tarfile.open(filepath, 'r') as tfile:
outdir = os.path.join(head, tail.replace(".tar",""))
tfile.extractall(outdir)
elif filepath.endswith(".zip"):
with zipfile.ZipFile(filepath, "r") as zipf:
outdir = os.path.join(head, tail.replace(".zip",""))
zipf.extractall(outdir)
else: return
print("Extracted {0}".format(filepath))
if delete_originals:
os.remove(filepath)
return
#testing area
if __name__ == "__main__":
formats = [r"C:\Users\jwely\Desktop\troubleshooting\zip_tests\MOD09A1.A2015033.h11v05.005.2015044233105_1_tar.tar",
r"C:\Users\jwely\Desktop\troubleshooting\zip_tests\MOD09A1.A2015033.h11v05.005.2015044233105_1_targz.tar.gz",
r"C:\Users\jwely\Desktop\troubleshooting\zip_tests\MOD09A1.A2015033.h11v05.005.2015044233105_1.tif.gz",
r"C:\Users\jwely\Desktop\troubleshooting\zip_tests\MOD09A1.A2015033.h11v05.005.2015044233105_1_zip.zip"]
for format in formats:
extract_archive(format)
|
"""a JupyterLite addon for serving"""
import sys
import doit
from traitlets import Bool, Int, default
from .base import BaseAddon
class ServeAddon(BaseAddon):
__all__ = ["status", "serve"]
has_tornado: bool = Bool()
port: int = Int(8000)
@default("has_tornado")
def _default_has_tornado(self):
try:
__import__("tornado")
return True
except (ImportError, AttributeError):
return False
def status(self, manager):
yield dict(
name="contents",
actions=[
lambda: print(
f""" will serve {self.port} with: {"tornado" if self.has_tornado else "stdlib"}"""
)
],
)
def serve(self, manager):
task = dict(
name="serve",
doc=f"run server at http://localhost:{self.port}/ for {manager.output_dir}",
uptodate=[lambda: False],
)
if self.has_tornado:
task["actions"] = [(self._serve_tornado, [])]
else:
task["actions"] = [
lambda: self.log.info(
"Using python's built-in http.server: "
"install tornado for a snappier experience"
),
doit.tools.Interactive(
[sys.executable, "-m", "http.server", "-b", "localhost"],
cwd=str(self.manager.output_dir),
shell=False,
),
]
yield task
def _serve_tornado(self):
from tornado import ioloop, web
class Handler(web.StaticFileHandler):
def parse_url_path(self, url_path):
if not url_path or url_path.endswith("/"):
url_path = url_path + "index.html"
return url_path
path = str(self.manager.output_dir)
routes = [("/(.*)", Handler, {"path": path})]
app = web.Application(routes, debug=True)
self.log.warning(
f"""
Serving JupyterLite from:
{path}
on:
http://localhost:{self.port}/
*** Press Ctrl+C to exit **
"""
)
app.listen(self.port)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
self.log.warning(f"""Stopping http://localhost:{self.port}...""")
|
import os
import sys
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(file_path)
project_directory = os.path.dirname(current_directory)
sys.path.insert(0, project_directory)
from {{package_name}}.models import {{ project_name }}
# load model in development mode
model_path = os.path.join(project_directory, 'saved_models', '{{ package_name }}')
model = {{ project_name }}().load_model(model_path)
# get input data
iris = load_iris()
df = pd.concat(
[
pd.DataFrame(data=iris.data, columns=iris.feature_names),
pd.DataFrame(data=iris.target, columns=['species'])
],
axis=1
)
X = pd.concat(
[
df.drop(['species', 'sepal length (cm)'], axis=1),
pd.get_dummies(df['species'])
],
axis=1
)
Y = df['sepal length (cm)']
# predict
for x, y in zip(X[1:10].values, Y[1:10]):
print(f"x: {x}, y_hat: {model([x])}, y: {y}")
|
#!usr/bin/env python3
"""
Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options. For a full
list see the documentation:
https://www.sphinx-doc.org/en/master/usage/configuration.html
"""
import binance
# -- Project information -----------------------------------------------------
project = 'binance'
copyright = '2021, Rasmus Villebro'
author = 'Rasmus Villebro'
# The full version, including alpha/beta/rc tags
release = binance.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/rvillebro/binance",
"icon": "fab fa-github-square",
},
],
"use_edit_page_button": True,
}
html_context = {
# "github_url": "https://github.com", # or your GitHub Enterprise interprise
"github_user": "rvillebro",
"github_repo": "binance",
"github_version": "master",
"doc_path": "doc/source/",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os.path
from collections import OrderedDict, defaultdict
from .metadata import read_model_metadata, find_dimension
from .metadata import LocalizationContext
from .auth import NotAuthorized
from .common import read_json_file
from .config_parser import read_slicer_config
from .errors import ConfigurationError, ArgumentError, CubesError
from .logging import get_logger
from .calendar import Calendar
from .namespace import Namespace
from .compat import ConfigParser
from . import ext
from . import compat
__all__ = [
"Workspace",
]
SLICER_INFO_KEYS = (
"name",
"label",
"description", # Workspace model description
"copyright", # Copyright for the data
"license", # Data license
"maintainer", # Name (and maybe contact) of data maintainer
"contributors", # List of contributors
"visualizers", # List of dicts with url and label of server's visualizers
"keywords", # List of keywords describing server's cubes
"related" # List of dicts with related servers
)
def interpret_config_value(value):
if value is None:
return value
if isinstance(value, compat.string_type):
if value.lower() in ('yes', 'true', 'on'):
return True
elif value.lower() in ('no', 'false', 'off'):
return False
return value
def config_items_to_dict(items):
return dict([ (k, interpret_config_value(v)) for (k, v) in items ])
class Workspace(object):
def __init__(self, config=None, stores=None, load_base_model=True,
**_options):
"""Creates a workspace. `config` should be a `ConfigParser` or a
path to a config file. `stores` should be a dictionary of store
configurations, a `ConfigParser` or a path to a ``stores.ini`` file.
Properties:
* `stores` – dictionary of stores
* `store_infos` – dictionary of store configurations
* `namespace` – default namespace
* `logger` – workspace logegr
* `rot_dir` – root directory where all relative paths are looked for
* `models_dir` – directory with models (if relative, then relative to
the root directory)
* `info` – info dictionary from the info file or info section
* `calendar` – calendar object providing date and time functions
* `ns_languages` – dictionary where keys are namespaces and values
are language to translation path mappings.
"""
# FIXME: **_options is temporary solution/workaround before we get
# better configuration. Used internally. Don't use!
config = read_slicer_config(config)
self.store_infos = {}
self.stores = {}
# Logging
# =======
#Log to file or console
if config.has_option("workspace", "log"):
self.logger = get_logger(path=config.get("workspace", "log"))
else:
self.logger = get_logger()
#Change to log level if necessary
if config.has_option("workspace", "log_level"):
level = config.get("workspace", "log_level").upper()
self.logger.setLevel(level)
# Set the default models path
if config.has_option("workspace", "root_directory"):
self.root_dir = config.get("workspace", "root_directory")
elif "cubes_root" in _options:
# FIXME: this is quick workaround, see note at the beginning of
# this method
self.root_dir = _options["cubes_root"]
else:
self.root_dir = ""
if config.has_option("workspace", "models_directory"):
self.models_dir = config.get("workspace", "models_directory")
elif config.has_option("workspace", "models_path"):
self.models_dir = config.get("workspace", "models_path")
else:
self.models_dir = ""
if self.root_dir and not os.path.isabs(self.models_dir):
self.models_dir = os.path.join(self.root_dir, self.models_dir)
if self.models_dir:
self.logger.debug("Models root: %s" % self.models_dir)
else:
self.logger.debug("Models root set to current directory")
# Namespaces and Model Objects
# ============================
self.namespace = Namespace()
# Cache of created global objects
self._cubes = {}
# Note: providers are responsible for their own caching
# Info
# ====
self.info = OrderedDict()
if config.has_option("workspace", "info_file"):
path = config.get("workspace", "info_file")
if self.root_dir and not os.path.isabs(path):
path = os.path.join(self.root_dir, path)
info = read_json_file(path, "Slicer info")
for key in SLICER_INFO_KEYS:
self.info[key] = info.get(key)
elif config.has_section("info"):
info = dict(config.items("info"))
if "visualizer" in info:
info["visualizers"] = [{
"label": info.get("label", info.get("name", "Default")),
"url": info["visualizer"]
}]
for key in SLICER_INFO_KEYS:
self.info[key] = info.get(key)
# Register stores from external stores.ini file or a dictionary
if not stores and config.has_option("workspace", "stores_file"):
stores = config.get("workspace", "stores_file")
# Prepend the root directory if stores is relative
if self.root_dir and not os.path.isabs(stores):
stores = os.path.join(self.root_dir, stores)
if isinstance(stores, compat.string_type):
store_config = ConfigParser()
try:
store_config.read(stores)
except Exception as e:
raise ConfigurationError("Unable to read stores from %s. "
"Reason: %s" % (stores, str(e) ))
for store in store_config.sections():
self._register_store_dict(store,
dict(store_config.items(store)))
elif isinstance(stores, dict):
for name, store in stores.items():
self._register_store_dict(name, store)
elif stores is not None:
raise ConfigurationError("Unknown stores description object: %s" %
(type(stores)))
# Calendar
# ========
if config.has_option("workspace", "timezone"):
timezone = config.get("workspace", "timezone")
else:
timezone = None
if config.has_option("workspace", "first_weekday"):
first_weekday = config.get("workspace", "first_weekday")
else:
first_weekday = 0
self.logger.debug("Workspace calendar timezone: %s first week day: %s"
% (timezone, first_weekday))
self.calendar = Calendar(timezone=timezone,
first_weekday=first_weekday)
# Register Stores
# ===============
#
# * Default store is [store] in main config file
# * Stores are also loaded from main config file from sections with
# name [store_*] (not documented feature)
default = None
if config.has_section("store"):
default = dict(config.items("store"))
if default:
self._register_store_dict("default", default)
# Register [store_*] from main config (not documented)
for section in config.sections():
if section != "store" and section.startswith("store"):
name = section[6:]
self._register_store_dict(name, dict(config.items(section)))
if config.has_section("browser"):
self.browser_options = dict(config.items("browser"))
else:
self.browser_options = {}
if config.has_section("main"):
self.options = dict(config.items("main"))
else:
self.options = {}
# Register Languages
# ==================
#
# Register [language *]
self.ns_languages = defaultdict(dict)
for section in config.sections():
if section.startswith("locale"):
lang = section[9:]
# namespace -> path
for nsname, path in config.items(section):
if nsname == "defalt":
ns = self.namespace
else:
(ns, _) = self.namespace.namespace(nsname)
ns.add_translation(lang, path)
# Authorizer
# ==========
if config.has_option("workspace", "authorization"):
auth_type = config.get("workspace", "authorization")
options = dict(config.items("authorization"))
options["cubes_root"] = self.root_dir
self.authorizer = ext.authorizer(auth_type, **options)
else:
self.authorizer = None
# Configure and load models
# =========================
# Models are searched in:
# [model]
# [models] * <- depreciated!
# TODO: add this for nicer zero-conf
# root/model.json
# root/main.cubesmodel
# models/*.cubesmodel
models = []
# Undepreciated
if config.has_section("model"):
if not config.has_option("model", "path"):
raise ConfigurationError("No model path specified")
path = config.get("model", "path")
provider = config.get("model", "provider")
models.append(("main", path, provider))
# TODO: Depreciate this too
if config.has_section("models"):
models += config.items("models")
for model, path, provider in models:
self.logger.debug("Loading model %s" % model)
self.import_model(path, provider)
def flush_lookup_cache(self):
"""Flushes the cube lookup cache."""
self._cubes.clear()
# TODO: flush also dimensions
def _get_namespace(self, ref):
"""Returns namespace with ference `ref`"""
if not ref or ref == "default":
return self.namespace
return self.namespace.namespace(ref)[0]
def add_translation(self, locale, trans, ns="default"):
"""Add translation `trans` for `locale`. `ns` is a namespace. If no
namespace is specified, then default (global) is used."""
namespace = self._get_namespace(ns)
namespace.add_translation(locale, trans)
def _register_store_dict(self, name, info):
info = dict(info)
try:
type_ = info.pop("type")
except KeyError:
try:
type_ = info.pop("backend")
except KeyError:
raise ConfigurationError("Store '%s' has no type specified" % name)
else:
self.logger.warn("'backend' is depreciated, use 'type' for "
"store (in %s)." % str(name))
self.register_store(name, type_, **info)
def register_default_store(self, type_, **config):
"""Convenience function for registering the default store. For more
information see `register_store()`"""
self.register_store("default", type_, **config)
def register_store(self, name, type_, include_model=True, **config):
"""Adds a store configuration."""
config = dict(config)
if name in self.store_infos:
raise ConfigurationError("Store %s already registered" % name)
self.store_infos[name] = (type_, config)
# Model and provider
# ------------------
# If store brings a model, then include it...
if include_model and "model" in config:
model = config.pop("model")
else:
model = None
# Get related model provider or override it with configuration
store_factory = ext.store.factory(type_)
if hasattr(store_factory, "related_model_provider"):
provider = store_factory.related_model_provider
else:
provider = None
provider = config.pop("model_provider", provider)
nsname = config.pop("namespace", None)
if model:
self.import_model(model, store=name, namespace=nsname,
provider=provider)
elif provider:
# Import empty model and register the provider
self.import_model({}, store=name, namespace=nsname,
provider=provider)
self.logger.debug("Registered store '%s'" % name)
def _store_for_model(self, metadata):
"""Returns a store for model specified in `metadata`. """
store_name = metadata.get("store")
if not store_name and "info" in metadata:
store_name = metadata["info"].get("store")
store_name = store_name or "default"
return store_name
# TODO: this is very confusing process, needs simplification
# TODO: change this to: add_model_provider(provider, info, store, languages, ns)
def import_model(self, model=None, provider=None, store=None,
translations=None, namespace=None):
"""Registers the `model` in the workspace. `model` can be a
metadata dictionary, filename, path to a model bundle directory or a
URL.
If `namespace` is specified, then the model's objects are stored in
the namespace of that name.
`store` is an optional name of data store associated with the model.
If not specified, then the one from the metadata dictionary will be
used.
Model's provider is registered together with loaded metadata. By
default the objects are registered in default global namespace.
Note: No actual cubes or dimensions are created at the time of calling
this method. The creation is deferred until
:meth:`cubes.Workspace.cube` or :meth:`cubes.Workspace.dimension` is
called.
"""
# 1. Metadata
# -----------
# Make sure that the metadata is a dictionary
#
# TODO: Use "InlineModelProvider" and "FileBasedModelProvider"
if store and not isinstance(store, compat.string_type):
raise ArgumentError("Store should be provided by name "
"(as a string).")
# 1. Model Metadata
# -----------------
# Make sure that the metadata is a dictionary
#
# TODO: Use "InlineModelProvider" and "FileBasedModelProvider"
if isinstance(model, compat.string_type):
self.logger.debug("Importing model from %s. "
"Provider: %s Store: %s NS: %s"
% (model, provider, store, namespace))
path = model
if self.models_dir and not os.path.isabs(path):
path = os.path.join(self.models_dir, path)
model = read_model_metadata(path)
elif isinstance(model, dict):
self.logger.debug("Importing model from dictionary. "
"Provider: %s Store: %s NS: %s"
% (provider, store, namespace))
elif model is None:
model = {}
else:
raise ConfigurationError("Unknown model '%s' "
"(should be a filename or a dictionary)"
% model)
# 2. Model provider
# -----------------
# Create a model provider if name is given. Otherwise assume that the
# `provider` is a ModelProvider subclass instance
if isinstance(provider, compat.string_type):
provider = ext.model_provider(provider, model)
# TODO: remove this, if provider is external, it should be specified
if not provider:
provider_name = model.get("provider", "default")
provider = ext.model_provider(provider_name, model)
# 3. Store
# --------
# Link the model with store
store = store or model.get("store")
if store or (hasattr(provider, "requires_store") \
and provider.requires_store()):
provider.bind(self.get_store(store))
# 4. Namespace
# ------------
if namespace:
if namespace == "default":
ns = self.namespace
elif isinstance(namespace, compat.string_type):
(ns, _) = self.namespace.namespace(namespace, create=True)
else:
ns = namespace
elif store == "default":
ns = self.namespace
else:
# Namespace with the same name as the store.
(ns, _) = self.namespace.namespace(store, create=True)
ns.add_provider(provider)
def add_slicer(self, name, url, **options):
"""Register a slicer as a model and data provider."""
self.register_store(name, "slicer", url=url, **options)
self.import_model({}, provider="slicer", store=name)
def cube_names(self, identity=None):
"""Return names all available cubes."""
return [cube["name"] for cube in self.list_cubes()]
# TODO: this is not loclized!!!
def list_cubes(self, identity=None):
"""Get a list of metadata for cubes in the workspace. Result is a list
of dictionaries with keys: `name`, `label`, `category`, `info`.
The list is fetched from the model providers on the call of this
method.
If the workspace has an authorizer, then it is used to authorize the
cubes for `identity` and only authorized list of cubes is returned.
"""
all_cubes = self.namespace.list_cubes(recursive=True)
if self.authorizer:
by_name = dict((cube["name"], cube) for cube in all_cubes)
names = [cube["name"] for cube in all_cubes]
authorized = self.authorizer.authorize(identity, names)
all_cubes = [by_name[name] for name in authorized]
return all_cubes
def cube(self, ref, identity=None, locale=None):
"""Returns a cube with full cube namespace reference `ref` for user
`identity` and translated to `locale`."""
if not isinstance(ref, compat.string_type):
raise TypeError("Reference is not a string, is %s" % type(ref))
if self.authorizer:
authorized = self.authorizer.authorize(identity, [ref])
if not authorized:
raise NotAuthorized
# If we have a cached cube, return it
# See also: flush lookup
cube_key = (ref, identity, locale)
if cube_key in self._cubes:
return self._cubes[cube_key]
# Find the namespace containing the cube – we will need it for linking
# later
(namespace, provider, basename) = self.namespace.find_cube(ref)
cube = provider.cube(basename, locale=locale, namespace=namespace)
cube.namespace = namespace
cube.store = provider.store
# TODO: cube.ref -> should be ref and cube.name should be basename
cube.basename = basename
cube.name = ref
lookup = namespace.translation_lookup(locale)
if lookup:
# TODO: pass lookup instead of jsut first found translation
context = LocalizationContext(lookup[0])
trans = context.object_localization("cubes", cube.name)
cube = cube.localized(trans)
# Cache the cube
self._cubes[cube_key] = cube
return cube
def dimension(self, name, locale=None, namespace=None, provider=None):
"""Returns a dimension with `name`. Raises `NoSuchDimensionError` when
no model published the dimension. Raises `RequiresTemplate` error when
model provider requires a template to be able to provide the
dimension, but such template is not a public dimension.
The standard lookup when linking a cube is:
1. look in the cube's provider
2. look in the cube's namespace – all providers within that namespace
3. look in the default (global) namespace
"""
return find_dimension(name, locale,
namespace or self.namespace,
provider)
def _browser_options(self, cube):
"""Returns browser configuration options for `cube`. The options are
taken from the configuration file and then overriden by cube's
`browser_options` attribute."""
options = dict(self.browser_options)
if cube.browser_options:
options.update(cube.browser_options)
return options
def browser(self, cube, locale=None, identity=None):
"""Returns a browser for `cube`."""
# TODO: bring back the localization
# model = self.localized_model(locale)
if isinstance(cube, compat.string_type):
cube = self.cube(cube, identity=identity)
locale = locale or cube.locale
if isinstance(cube.store, compat.string_type):
store_name = cube.store or "default"
store = self.get_store(store_name)
store_type = self.store_infos[store_name][0]
store_info = self.store_infos[store_name][1]
elif cube.store:
store = cube.store
store_info = store.options or {}
else:
store = self.get_store("default")
store_info = store.options or {}
store_type = store.store_type
if not store_type:
raise CubesError("Store %s has no store_type set" % store)
cube_options = self._browser_options(cube)
# TODO: merge only keys that are relevant to the browser!
options = dict(store_info)
options.update(cube_options)
# TODO: Construct options for the browser from cube's options
# dictionary and workspece default configuration
browser_name = cube.browser
if not browser_name and hasattr(store, "default_browser_name"):
browser_name = store.default_browser_name
if not browser_name:
browser_name = store_type
if not browser_name:
raise ConfigurationError("No store specified for cube '%s'" % cube)
browser = ext.browser(browser_name, cube, store=store,
locale=locale, calendar=self.calendar,
**options)
# TODO: remove this once calendar is used in all backends
browser.calendar = self.calendar
return browser
def cube_features(self, cube, identity=None):
"""Returns browser features for `cube`"""
# TODO: this might be expensive, make it a bit cheaper
# recycle the feature-providing browser or something. Maybe use class
# method for that
return self.browser(cube, identity).features()
def get_store(self, name=None):
"""Opens a store `name`. If the store is already open, returns the
existing store."""
name = name or "default"
if name in self.stores:
return self.stores[name]
try:
type_, options = self.store_infos[name]
except KeyError:
raise ConfigurationError("Unknown store '{}'".format(name))
# TODO: temporary hack to pass store name and store type
store = ext.store(type_, store_type=type_, **options)
self.stores[name] = store
return store
|
def minimum():
l= [8, 6, 4, 8, 4, 50, 2, 7]
i=0
min=l[i]
while i<len(l):
if l[i]<min:
min=l[i]
i=i+1
print(min)
minimum()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 09:21:23 2020
@author: SethHarden
"""
""" VERSION 1 ITERATIVE
# Data structure to store a Binary Tree node
class Node:
def __init__(self, key=None, left=None, right=None):
self.key = key
self.left = left
self.right = right
# Recursive function to check if two given binary trees are identical or not
def isIdentical(x, y):
# if both trees are empty, return true
if x is None and y is None:
return True
# if both trees are non-empty and value of their root node matches,
# recur for their left and right sub-tree
return (x and y) and (x.key == y.key) and \
isIdentical(x.left, y.left) and isIdentical(x.right, y.right)
if __name__ == '__main__':
# construct first tree
x = Node(15)
x.left = Node(10)
x.right = Node(20)
x.left.left = Node(8)
x.left.right = Node(12)
x.right.left = Node(16)
x.right.right = Node(25)
# construct second tree
y = Node(15)
y.left = Node(10)
y.right = Node(20)
y.left.left = Node(8)
y.left.right = Node(12)
y.right.left = Node(16)
y.right.right = Node(25)
if isIdentical(x, y):
print("Given Binary Trees are identical")
else:
print("Given Binary Trees are not identical")
------------ VERSION 2 RECURSIVE --------------
"""
from collections import deque
# Data structure to store a Binary Tree node
class Node:
def __init__(self, key=None, left=None, right=None):
self.key = key
self.left = left
self.right = right
# Iterative function to check if two given binary trees are identical or not
def isIdentical(x, y):
# if both trees are empty, return true
if x is None and y is None:
return True
# if first tree is empty (& second tree is non-empty), return false
if x is None:
return False
# if second tree is empty (& first tree is non-empty), return false
if y is None:
return False
# create a stack to hold pairs
stack = deque()
stack.append((x, y))
# loop till stack is empty
while stack:
# pop top pair from the stack and process it
x, y = stack.pop()
# if value of their root node don't match, return false
if x.key != y.key:
return False
# if left subtree of both x and y exists, push their addresses
# to stack else return false if only one left child exists
if x.left and y.left:
stack.append((x.left, y.left))
elif x.left or y.left:
return False
# if right subtree of both x and y exists, push their addresses
# to stack else return false if only one right child exists
if x.right and y.right:
stack.append((x.right, y.right))
elif x.right or y.right:
return False
# if we reach here, both binary trees are identical
return True
if __name__ == '__main__':
# construct first tree
x = Node(15)
x.left = Node(10)
x.right = Node(20)
x.left.left = Node(8)
x.left.right = Node(12)
x.right.left = Node(16)
x.right.right = Node(25)
# construct second tree
y = Node(15)
y.left = Node(10)
y.right = Node(20)
y.left.left = Node(8)
y.left.right = Node(12)
y.right.left = Node(16)
y.right.right = Node(25)
if isIdentical(x, y):
print("Given binary Trees are identical")
else:
print("Given binary Trees are not identical")
|
# -*- coding: utf-8 -*-
class Plateform(object):
""" Class who define a plateform.
yml files are read from heimdall/conf/hosts/plateform.yml and used for Plateform Object instantiation
Keyword Args:
:param: name: plateform's name.
:param: desc: plateform's description.
:param: id: plateform's id.
:param: path: plateform's path to yaml file.
:param: environment: plateform's environments.
:type: name: str
:type: desc: str
:type: id: int
:type: path: str
:type: environment: list of Host Object
:return: Plateform object
:rtype: object
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name', '')
self.desc = kwargs.get('desc', '')
self.id = kwargs.get('id', 1)
self.path = kwargs.get('path', '')
self.environment = kwargs.get('environment', '')
def __str__(self):
return super(Plateform, self).__str__()
def __repr__(self):
return super(Plateform, self).__repr__()
def __setattr__(self, name, value):
return object.__setattr__(self, name, value)
def __getattribute__(self, name):
return object.__getattribute__(self, name)
def __iter__(self):
for k, v in self.__dict__.iteritems():
if k == 'environment':
v = {}
for env, hosts in self.environment.iteritems():
v[env] = []
[v[env].append(dict(h)) for h in hosts]
yield (k, v)
def get_new_id(self, environment):
""" Return a new id available for a rule.
:return: Length of list of all Host Objects in hosts attribute.
:rtype: int
"""
return len(self.environment[environment]) + 1
def check_host_exist(self, hostname, environment):
""" Check if name exist in list of all Host Objects.
:param hostname: Host's name that must be checked
:param environment: Host's environment
:type: hostname: str
:type: environment: str
:return: If name exist return name, return False otherwise.
:rtype: bool or str
"""
exist = False
try:
self.environment[environment] # Environment doesnt exist
except KeyError:
return exist
for hosts in self.environment[environment]:
if hosts.name == hostname:
exist = True
return exist
def check_environment(self, environment):
if environment not in self.environment.keys():
return False
else:
return True
def remove_host(self, host):
""" Remove a Host Object from Plateform.
:param: host: Host Object that must be removed.
:type: host: Host Object
:return: Updated Plateform Object
:rtype: Plateform Object
"""
if host == -1:
self.environment[host.environment] = []
else:
[self.environment[h.environment].remove(h) for h in host] if isinstance(host, list) else self.environment[
host.environment].remove(host)
return self
def add_host(self, host):
""" Add a Host Object to Plateform.
:param: host: Host Object that must be added.
:type: host: Host Object
:return: Updated Plateform Object
:rtype: Plateform Object
"""
self.environment[host.environment].append(host)
return self
|
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import BatchNormalization
from keras.layers import Dense
from keras.layers import Input, Reshape, CuDNNLSTM, Bidirectional
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model
from keras.models import Sequential
from keras.optimizers import Adam
from keras.utils import np_utils
from ds_utils import load_dataset
from midi_utils import convert_to_midi
def prepare_sequences(notes, pitch_names, latent_dim):
""" Prepare the sequences used by the Neural Network """
# Create a dictionary to map pitches to integers
note_to_int = dict((note, number) for number, note in enumerate(pitch_names))
network_input = []
network_output = []
sequence_length = 100
# Create input sequences and the corresponding outputs
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
network_output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# Reshape the input into a format compatible with LSTM layers
# network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
normalized_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# Normalize input
# network_input = network_input / float(latent_dim)
normalized_input = normalized_input / float(latent_dim)
network_output = np_utils.to_categorical(network_output)
return network_input, normalized_input, network_output
class ComposerGAN:
def __init__(self):
self.notes = load_dataset('kaggle_ds_dump.notes')
self.latent_dim = len(set(self.notes))
self.pitch_names = sorted(set(item for item in self.notes))
self.x, self.x_normalized, self.y = prepare_sequences(self.notes, self.pitch_names, self.latent_dim)
self.seq_shape = (self.x_normalized.shape[1], self.x_normalized.shape[2])
self.disc_loss = []
self.gen_loss = []
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates note sequences
z = Input(shape=(self.latent_dim,))
generated_seq = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(generated_seq)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_discriminator(self):
model = Sequential()
model.add(CuDNNLSTM(512, input_shape=self.seq_shape, return_sequences=True))
model.add(Bidirectional(CuDNNLSTM(512)))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
seq = Input(shape=self.seq_shape)
validity = model(seq)
return Model(seq, validity)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.seq_shape), activation='tanh'))
model.add(Reshape(self.seq_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
seq = model(noise)
return Model(noise, seq)
def train(self, epochs, batch_size, save_period):
# Adversarial ground truths
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# Training the model
for epoch in range(1, epochs + 1):
# Training the discriminator
# Select a random batch of note sequences
idx = np.random.randint(0, self.x_normalized.shape[0], batch_size)
real_seqs = self.x_normalized[idx]
# noise = np.random.choice(range(484), (batch_size, self.latent_dim))
# noise = (noise-242)/242
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new note sequences
gen_seqs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(real_seqs, real)
d_loss_fake = self.discriminator.train_on_batch(gen_seqs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Training the Generator
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label lstm_samples as real)
g_loss = self.combined.train_on_batch(noise, real)
# Print the progress and save into loss lists
if epoch % save_period == 0:
print(f'Epoch {epoch}/{epochs} [D loss: {d_loss[0]}, accuracy: {100 * d_loss[1]}] [G loss: {g_loss}]')
self.disc_loss.append(d_loss[0])
self.gen_loss.append(g_loss)
self.discriminator.save(f'./weights/disc-{epochs}.h5')
self.generator.save(f'./weights/gen-{epochs}.h5')
self.plot_loss()
def generate(self):
self.generator.load_weights('./best/gen.h5')
self.discriminator.load_weights('./best/disc.h5')
pitchnames = sorted(set(item for item in self.notes))
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
# Use random noise to generate sequences
noise = np.random.normal(0, 1, (1, self.latent_dim))
predictions = self.generator.predict(noise)
pred_notes = [x * self.latent_dim for x in predictions[0]]
pred_notes = [int_to_note[int(x)] for x in pred_notes]
convert_to_midi('gan_samples', pred_notes)
def plot_loss(self):
plt.plot(self.disc_loss, c='red')
plt.plot(self.gen_loss, c='blue')
plt.title("GAN Loss per Epoch")
plt.legend(['Discriminator', 'Generator'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig('GAN_Loss_per_Epoch_final.png', transparent=True)
plt.close()
if __name__ == '__main__':
composer_gan = ComposerGAN()
# composer_gan.train(epochs=5_000, batch_size=256, save_period=100)
composer_gan.generate()
|
'''
Copyright 2020 The Microsoft DeepSpeed Team
'''
import math
import torch
import time
from pathlib import Path
from ..op_builder import CPUAdamBuilder
from deepspeed.utils.logging import should_log_le
class DeepSpeedCPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self,
model_params,
lr=1e-3,
bias_correction=True,
betas=(0.9,
0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adamw_mode=True,
fp32_optimizer_states=True):
"""Fast vectorized implementation of two variations of Adam optimizer on CPU:
* Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
* AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
In order to apply this optimizer, the model requires to have its master parameter (in FP32)
reside on the CPU memory.
To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
(https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
For calling step function, there are two options available: (1) update optimizer's states and (2) update
optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
option can bring 30% higher throughput than the doing the copy separately using option one.
.. note::
We recommend using our `config
<https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
to allow :meth:`deepspeed.initialize` to build this optimizer
for you.
Arguments:
model_params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
adamw_mode: select between Adam and AdamW implementations (default: AdamW)
full_precision_optimizer_states: creates momementum and variance in full precision regardless of
the precision of the parameters (default: True)
"""
default_args = dict(lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
amsgrad=amsgrad)
super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
self.opt_id = DeepSpeedCPUAdam.optimizer_id
DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
self.adam_w_mode = adamw_mode
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adam = CPUAdamBuilder().load()
self.ds_opt_adam.create_adam(self.opt_id,
lr,
betas[0],
betas[1],
eps,
weight_decay,
adamw_mode,
should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adam.destroy_adam(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data,
dtype=state_dtype,
device='cpu')
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data,
dtype=state_dtype,
device='cpu')
#memory_format=torch.preserve_format)
state['step'] += 1
beta1, beta2 = group['betas']
if fp16_param_groups is not None:
self.ds_opt_adam.adam_update_copy(
self.opt_id,
state['step'],
group['lr'],
beta1,
beta2,
group['eps'],
group['weight_decay'],
group['bias_correction'],
p.data,
p.grad.data,
state['exp_avg'],
state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adam.adam_update(self.opt_id,
state['step'],
group['lr'],
beta1,
beta2,
group['eps'],
group['weight_decay'],
group['bias_correction'],
p.data,
p.grad.data,
state['exp_avg'],
state['exp_avg_sq'])
return loss
|
from django.shortcuts import render, redirect
from django.views import generic
from .forms import (
BookFormset,
BookModelFormset,
BookModelForm,
AuthorFormset
)
from .models import Book, Author
def create_book_normal(request):
template_name = 'store/create_normal.html'
heading_message = 'Formset Demo'
if request.method == 'GET':
formset = BookFormset(request.GET or None)
elif request.method == 'POST':
formset = BookFormset(request.POST)
if formset.is_valid():
for form in formset:
name = form.cleaned_data.get('name')
# save book instance
if name:
Book(name=name).save()
return redirect('store:book_list')
return render(request, template_name, {
'formset': formset,
'heading': heading_message,
})
class BookListView(generic.ListView):
model = Book
context_object_name = 'books'
template_name = 'store/list.html'
def create_book_model_form(request):
template_name = 'store/create_normal.html'
heading_message = 'Model Formset Demo'
if request.method == 'GET':
formset = BookModelFormset(queryset=Book.objects.none())
elif request.method == 'POST':
formset = BookModelFormset(request.POST)
if formset.is_valid():
for form in formset:
# only save if name is present
if form.cleaned_data.get('name'):
form.save()
return redirect('store:book_list')
return render(request, template_name, {
'formset': formset,
'heading': heading_message,
})
def create_book_with_authors(request):
template_name = 'store/create_with_author.html'
if request.method == 'GET':
bookform = BookModelForm(request.GET or None)
formset = AuthorFormset(queryset=Author.objects.none())
elif request.method == 'POST':
bookform = BookModelForm(request.POST)
formset = AuthorFormset(request.POST)
if bookform.is_valid() and formset.is_valid():
# first save this book, as its reference will be used in `Author`
book = bookform.save()
for form in formset:
# so that `book` instance can be attached.
author = form.save(commit=False)
author.book = book
author.save()
return redirect('store:book_list')
return render(request, template_name, {
'bookform': bookform,
'formset': formset,
})
|
from trpgcreator.ui.widgets.other_stat_row import Ui_OtherStatRow
from PyQt5.QtWidgets import QWidget
class OtherStatRow(QWidget):
def __init__(self, stat_name, stat_id, current):
super().__init__()
self.wid = Ui_OtherStatRow()
self.wid.setupUi(self)
self.wid.doubleSpinBoxStart.setValue(current)
self.wid.labelStatName.setText(stat_name)
self.stat_id = stat_id
def get_data(self):
return {
'current': self.wid.doubleSpinBoxStart.value()
}
|
# Generated by Django 3.0.6 on 2020-05-17 23:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('asana_app', '0002_auto_20200517_2349'),
]
operations = [
migrations.AddField(
model_name='taskmodel',
name='name',
field=models.CharField(default='', editable=False, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='usermodel',
name='name',
field=models.CharField(editable=False, max_length=200),
),
]
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import os
import numpy as np
from sklearn.metrics import precision_recall_curve, auc
from scipy.stats import mannwhitneyu
import math
XRAY_PATH = os.path.join("evaluation", "train_xray")
MIXED_PATH = os.path.join("evaluation", "train_mixed")
ENTRIES_PATH = "../data/entries.idx"
FIGURE_DIR = "figures"
CPD_EMA_PDB_PATH = None # fill in with the location of the text files with pdb ids for CPD and EMA splits
CAT_PDB_PATH = os.path.join("data","splits")
######################
### UTIL FUNCTIONS ###
######################
def standard_error(vals):
std = np.std(vals)
err = std / (len(vals) ** 0.5)
return err
def calculate_error(lines):
vals = [float(line.split()[-1]) for line in lines]
return standard_error(vals)
def get_auprc_per_class(path):
auprc_per_class = dict()
err_per_class = dict()
auprc_list_per_class = dict()
for test in ["test_xray", "test_nmr"]:
auprc_per_class[test] = dict()
err_per_class[test] = dict()
auprc_list_per_class[test] = {1: [], 2: [], 3: [], 4: [], 5: [], 7: []}
fname = test + "_ec_auprcs.txt"
f = open(os.path.join(path, fname),"r").read().split("\n")[:-1]
for line in f:
ec = line.split()[2]
c = int(ec.split(".")[0])
auprc = float(line.split()[-1])
auprc_list_per_class[test][c].append(auprc)
for c in range(1,8):
if c == 6:
continue
auprc_per_class[test][c] = np.mean(auprc_list_per_class[test][c])
err_per_class[test][c] = standard_error(auprc_list_per_class[test][c])
return auprc_per_class, err_per_class, auprc_list_per_class
def full_year(y):
try:
y = int(y)
except:
return 0
else:
if y < 22:
return 2000 + y
else:
return 1900 + y
####################################
### PLOTTING FUNCTIONS: Figure 1 ###
####################################
def fig1():
colordict = {"test_xray": {"train_xray": (200/255, 182/255, 210/255), "train_mixed": (106/255, 73/255, 142/255)},
"test_nmr": {"train_xray": (179/255, 212/255, 149/255), "train_mixed": (64/255, 146/255, 58/255)}}
entries = pd.read_csv(ENTRIES_PATH, sep="\t", skiprows=2, names=["IDCODE", "HEADER", "ACCESSION DATE", "COMPOUND", "SOURCE", "AUTHOR LIST", "RESOLUTION", "EXPERIMENT TYPE"])
years = [full_year(d[-2:]) for d in entries["ACCESSION DATE"].tolist()]
entries["full year"] = years
years_list = entries["full year"].unique()
years_list.sort()
fig, ax = plt.subplots()
for y in years_list:
if y < 1990 or y > 2020:
continue
types = entries.loc[entries["full year"] == y]["EXPERIMENT TYPE"].tolist()
n_xray = len([x for x in types if "X-RAY DIFFRACTION" in x])
n_nmr = len([x for x in types if "SOLUTION NMR" in x])
n_em = len([x for x in types if "ELECTRON MICROSCOPY" in x])
ax.bar(y, n_xray + n_nmr + n_em, width=1, color="gray")
ax.bar(y, n_xray, width=1, color="darkviolet", alpha = 1.0)
ax.bar(y, n_em, width=1, color="white", alpha = 1.0, label="_nolegend_")
ax.bar(y, n_nmr, width=1, color="limegreen", alpha = 1.0)
ax.bar(y, n_em, width=1, color="goldenrod", alpha = 0.8)
plt.title("Experiment type of structures deposited in PDB, 1990-2020")
plt.xlabel("Year")
plt.ylabel("Number of structures")
ax.legend(labels=["Total", "X-Ray Crystallography", "NMR", "Cryo-EM"])
plt.savefig(os.path.join(FIGURE_DIR, "fig1.png"), dpi=400)
plt.clf()
###################################
### PLOTTING FUNCTIONS: DeepFRI ###
###################################
def res_pr_curve():
colordict = {"test_xray": {"train_xray": (200/255, 182/255, 210/255), "train_mixed": (106/255, 73/255, 142/255)},
"test_nmr": {"train_xray": (179/255, 212/255, 149/255), "train_mixed": (64/255, 146/255, 58/255)}}
xray_train_xray_test = pd.read_csv(os.path.join(XRAY_PATH, "test_xray_res_pr.csv"), index_col=0)
mixed_train_xray_test = pd.read_csv(os.path.join(MIXED_PATH, "test_xray_res_pr.csv"), index_col=0)
xray_train_nmr_test = pd.read_csv(os.path.join(XRAY_PATH, "test_nmr_res_pr.csv"), index_col=0)
mixed_train_nmr_test = pd.read_csv(os.path.join(MIXED_PATH, "test_nmr_res_pr.csv"), index_col=0)
xray_train_xray_test_auc = auc(xray_train_xray_test["Recall"], xray_train_xray_test["Precision"])
mixed_train_xray_test_auc = auc(mixed_train_xray_test["Recall"], mixed_train_xray_test["Precision"])
xray_train_nmr_test_auc = auc(xray_train_nmr_test["Recall"], xray_train_nmr_test["Precision"])
mixed_train_nmr_test_auc = auc(mixed_train_nmr_test["Recall"], mixed_train_nmr_test["Precision"])
plt.plot(xray_train_xray_test["Recall"], xray_train_xray_test["Precision"], label = "X-ray / Train: X-ray (AUPRC =" + str(xray_train_xray_test_auc) + ")", color=colordict["test_xray"]["train_xray"])
plt.plot(mixed_train_xray_test["Recall"], mixed_train_xray_test["Precision"], label = "X-ray / Train: Mixed (AUPRC = " + str(mixed_train_xray_test_auc) + ")", color=colordict["test_xray"]["train_mixed"])
plt.plot(xray_train_nmr_test["Recall"], xray_train_nmr_test["Precision"], label = "NMR / Train: X-ray (AUPRC = " + str(xray_train_nmr_test_auc) + ")", color=colordict["test_nmr"]["train_xray"])
plt.plot(mixed_train_nmr_test["Recall"], mixed_train_nmr_test["Precision"], label = "NMR / Train: Mixed (AUPRC = " + str(mixed_train_nmr_test_auc) + ")", color=colordict["test_nmr"]["train_mixed"])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("DeepFRI catalytic residue identification")
plt.legend(loc='upper right')
plt.savefig(os.path.join(FIGURE_DIR, "fig3b.png"),dpi=400)
plt.clf()
def avg_auprc_bar_ec():
colordict = {"xray": {XRAY_PATH: (200/255, 182/255, 210/255), MIXED_PATH: (106/255, 73/255, 142/255)},
"nmr": {XRAY_PATH: (179/255, 212/255, 149/255), MIXED_PATH: (64/255, 146/255, 58/255)}}
bar_width = 0.33
fig, ax = plt.subplots()
x = 0
for path in [XRAY_PATH, MIXED_PATH]:
for test in ["test_xray", "test_nmr"]:
fname = test + "_ec_auprcs.txt"
fname = os.path.join(path, fname)
f = open(fname, "r").read().split("\n")
err = calculate_error(f[:-1])
line = f[-1]
auprc = float(line.split()[2])
ax.bar(x, auprc, width = bar_width, color=colordict[test][path], yerr=err, capsize=10)
x += bar_width
x += bar_width
plt.title("DeepFRI EC Number Prediction")
plt.ylabel("Average AUPRC")
plt.xticks([0, bar_width, 2.5 * bar_width, 3.5 * bar_width], ["Test: X-ray\nTrain: Mixed", "Test: X-ray\nTrain: X-ray", "Test: NMR\nTrain: Mixed", "Test: NMR\nTrain: X-ray"])
plt.savefig(os.path.join(FIGURE_DIR, "fig4a.png"), dpi=400)
plt.clf()
def avg_auprc_bar_ec_by_class():
colordict = {"test_xray": {"train_xray": (200/255, 182/255, 210/255), "train_mixed": (106/255, 73/255, 142/255)},
"test_nmr": {"train_xray": (179/255, 212/255, 149/255), "train_mixed": (64/255, 146/255, 58/255)}}
bar_width = 1
cap_size = 4
distdict = dict()
auprc_per_class_xray, err_per_class_xray, distdict["train_xray"] = get_auprc_per_class(XRAY_PATH)
auprc_per_class_mixed, err_per_class_mixed, distdict["train_mixed"] = get_auprc_per_class(MIXED_PATH)
fig, ax = plt.subplots()
X = 0
for ec_class in range(1, 8):
if ec_class == 6:
continue
for test in ["test_xray", "test_nmr"]:
ax.bar(X, auprc_per_class_mixed[test][ec_class], width = bar_width, color=colordict[test]["train_mixed"], yerr=err_per_class_mixed[test][ec_class], capsize=cap_size)
ax.bar(X + bar_width, auprc_per_class_xray[test][ec_class], width = bar_width, color=colordict[test]["train_xray"], yerr=err_per_class_xray[test][ec_class], capsize=cap_size)
X += 2 * bar_width
X += bar_width
print("Mann-Whitney U test for Class " + str(ec_class))
for pair in [(("test_xray", "train_xray"), ("test_xray", "train_mixed")),
(("test_nmr", "train_xray"), ("test_nmr", "train_mixed")),
(("test_xray", "train_xray"), ("test_nmr", "train_xray")),
(("test_xray", "train_mixed"), ("test_nmr", "train_mixed"))]:
test1, train1 = pair[0]
test2, train2 = pair[1]
print(str(pair))
print(mannwhitneyu(distdict[train1][test1][ec_class], distdict[train2][test2][ec_class]))
print("----------")
print("----------")
plt.title("DeepFRI EC Number Prediction by Enzyme Class")
plt.ylabel("Average AUPRC")
ax.legend(labels=["X-ray / Train: Mixed", "X-ray / Train: X-ray", "NMR / Train: Mixed", "NMR / Train: X-ray"], bbox_to_anchor=(1.04,1), loc="upper left")
ticks = ["Oxidoreductases","Transferases","Hydrolases","Lyases","Isomerases","Translocases"]
plt.xticks(np.arange(bar_width * 1.5, 100, bar_width * 5)[:len(ticks)], ticks, rotation=40)
plt.savefig(os.path.join(FIGURE_DIR, "fig4b.png"), bbox_inches="tight", dpi=400)
plt.clf()
######################################
### PLOTTING FUNCTIONS: Supplement ###
######################################
def split_year_dists(train_list, val_list, test_lists, task, outf, lims, labels):
colordict = {"X-RAY DIFFRACTION": "darkviolet",
"SOLUTION NMR": "limegreen",
"ELECTRON MICROSCOPY": "goldenrod"}
entries = pd.read_csv(ENTRIES_PATH, sep="\t", skiprows=2, names=["IDCODE", "HEADER", "ACCESSION DATE", "COMPOUND", "SOURCE", "AUTHOR LIST", "RESOLUTION", "EXPERIMENT TYPE"])
train = open(train_list,"r").read().split("\n")
val = open(val_list,"r").read().split("\n")
if type(test_lists) == list:
test_xray = open(test_lists[0],"r").read().split("\n")
test_non_xray = open(test_lists[1],"r").read().split("\n")
test = test_xray + test_non_xray
else:
test = open(test_lists,"r").read().split("\n")
counter = 1
for name, split in [["training", train], ["validation", val], ["testing", test]]:
split = [elem[:4].upper() for elem in split]
df = entries.loc[entries["IDCODE"].isin(split)][["IDCODE","ACCESSION DATE","EXPERIMENT TYPE"]]
years = [full_year(d[-2:]) for d in df["ACCESSION DATE"].tolist()]
df["full year"] = years
years_list = range(lims['xmin'], lims['xmax'])
for method in ["X-RAY DIFFRACTION", "SOLUTION NMR", "ELECTRON MICROSCOPY"]:
sub_df = df.loc[df["EXPERIMENT TYPE"] == method]
if len(sub_df) == 0:
counter += 3
continue
ax = plt.subplot(3, 3, counter)
ax.set(xlim=(lims['xmin'], lims['xmax']))
if counter < 4:
plt.title(name)
for y in years_list:
prots = df.loc[df["full year"] == y]["IDCODE"].tolist()
types = df.loc[df["full year"] == y]["EXPERIMENT TYPE"].tolist()
examples_per_prot = dict()
for p in split:
if p in examples_per_prot.keys():
examples_per_prot[p] += 1
else:
examples_per_prot[p] = 1
n = sum([examples_per_prot[prots[i]] for i in range(len(prots)) if method in types[i]])
ax.bar(y, n, width=1, color=colordict[method])
counter += 3
counter -= 8
plt.tight_layout(rect=[0,0.03,1,0.95])
plt.suptitle(task + ": Distribution of deposition years per structure type")
plt.text(labels["xlabelx"], labels["xlabely"], "Year", ha='center')
plt.text(labels["ylabelx"], labels["ylabely"], "Number of structures", va='center', rotation='vertical')
xray_patch = mpatches.Patch(color=colordict["X-RAY DIFFRACTION"], label="X-Ray Crystallography")
nmr_patch = mpatches.Patch(color=colordict["SOLUTION NMR"], label="NMR")
em_patch = mpatches.Patch(color=colordict["ELECTRON MICROSCOPY"], label="Cryo-EM")
plt.legend(handles=[xray_patch, nmr_patch, em_patch], bbox_to_anchor=(1.04,3.9), loc="upper left")
plt.savefig(outf, dpi=400, bbox_inches="tight")
plt.clf()
def split_res_dists(train_list, val_list, test_lists, task, outf, lims, labels):
# no nmr because that doesnt have a resolution in the same way
colordict = {"X-RAY DIFFRACTION": "darkviolet",
"ELECTRON MICROSCOPY": "goldenrod"}
bin_width = 0.1
entries = pd.read_csv(ENTRIES_PATH, sep="\t", skiprows=2, names=["IDCODE", "HEADER", "ACCESSION DATE", "COMPOUND", "SOURCE", "AUTHOR LIST", "RESOLUTION", "EXPERIMENT TYPE"])
train = open(train_list,"r").read().split("\n")
val = open(val_list,"r").read().split("\n")
if type(test_lists) == list:
test_xray = open(test_lists[0],"r").read().split("\n")
test_non_xray = open(test_lists[1],"r").read().split("\n")
test = test_xray + test_non_xray
else:
test = open(test_lists,"r").read().split("\n")
counter = 1
for name, split in [["training", train], ["validation", val], ["testing", test]]:
split = [elem[:4].upper() for elem in split]
df = entries.loc[entries["IDCODE"].isin(split)][["IDCODE","RESOLUTION","EXPERIMENT TYPE"]]
for method in ["X-RAY DIFFRACTION", "ELECTRON MICROSCOPY"]:
sub_df = df.loc[df["EXPERIMENT TYPE"] == method]
if len(sub_df) == 0:
counter += 3
continue
res_list = []
for r in sub_df["RESOLUTION"]:
try:
tmp = float(r)
except:
spl = r.split(",")
if len(spl) > 1:
try:
l = [float(elem) for elem in spl]
except:
continue
else:
res_list.append(min(l))
else:
res_list.append(tmp)
bins = np.arange(0,math.ceil(max(res_list)), bin_width)
bin_counts = dict()
for r in res_list:
for i in range(len(bins)):
if bins[i] > r:
if bins[i-1] in bin_counts.keys():
bin_counts[bins[i-1]] += 1
else:
bin_counts[bins[i-1]] = 1
break
ax = plt.subplot(2, 3, counter)
ax.set(xlim=(lims['xmin'], lims['xmax']))
if counter < 4:
plt.title(name)
for b in bins:
if b in bin_counts.keys():
ax.bar(b, bin_counts[b], width=bin_width, color=colordict[method])
counter += 3
counter -= 5
plt.tight_layout(rect=[0,0.03,1,0.95])
plt.suptitle(task + ": Distribution of resolutions per structure type")
plt.text(labels["xlabelx"], labels["xlabely"], "Resolution (Angstroms)", ha='center')
plt.text(labels["ylabelx"], labels["ylabely"], "Number of structures", va='center', rotation='vertical')
xray_patch = mpatches.Patch(color=colordict["X-RAY DIFFRACTION"], label="X-Ray Crystallography")
em_patch = mpatches.Patch(color=colordict["ELECTRON MICROSCOPY"], label="Cryo-EM")
plt.legend(handles=[xray_patch, em_patch], bbox_to_anchor=(1.04,2.28), loc="upper left")
plt.savefig(outf, dpi=400, bbox_inches="tight")
plt.clf()
def split_res_dists_all():
if CPD_EMA_PDB_PATH != None: # define this at top of file to plot!
split_res_dists(os.path.join(CPD_EMA_PDB_PATH, "cpd_train_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "cpd_val_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "cpd_test_pdbs.txt"),
"Protein Sequence Design",
os.path.join(FIGURE_DIR, "figS7.png"),
{"xmin": 0,
"xmax": 4,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 1600, "ELECTRON MICROSCOPY": 10}},
{"xlabelx": -3.5,
"xlabely": -0.5,
"ylabelx": -13,
"ylabely": 2.5})
split_res_dists(os.path.join(CPD_EMA_PDB_PATH, "ema_train_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "ema_val_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "ema_test_pdbs.txt"),
"Estimation of Model Accuracy",
os.path.join(FIGURE_DIR, "figS8.png"),
{"xmin": 0,
"xmax": 4,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 35, "ELECTRON MICROSCOPY": 2}},
{"xlabelx": -2.7,
"xlabely": -0.3,
"ylabelx": -12.5,
"ylabely": 1})
split_res_dists(os.path.join(CAT_PDB_PATH, "train_mixed.txt"),
os.path.join(CAT_PDB_PATH, "val_mixed.txt"),
[os.path.join(CAT_PDB_PATH, "test_xray.txt"),os.path.join(CAT_PDB_PATH, "test_nmr.txt")],
"Catalytic Residue Prediction",
os.path.join(FIGURE_DIR, "figS9.png"),
{"xmin": 0,
"xmax": 20,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 2500, "ELECTRON MICROSCOPY": 5}},
{"xlabelx": -17,
"xlabely": -1,
"ylabelx": -69,
"ylabely": 3})
def split_year_dists_all():
if CPD_EMA_PDB_PATH != None: # define this at top of file to plot!
split_year_dists(os.path.join(CPD_EMA_PDB_PATH, "cpd_train_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "cpd_val_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "cpd_test_pdbs.txt"),
"Protein Sequence Design",
os.path.join(FIGURE_DIR, "figS4.png"),
{"xmin": 1975,
"xmax": 2020,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 1000, "SOLUTION NMR": 320, "ELECTRON MICROSCOPY": 40}},
{"xlabelx": 1932,
"xlabely": -3,
"ylabelx": 1817,
"ylabely": 10})
split_year_dists(os.path.join(CPD_EMA_PDB_PATH, "ema_train_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "ema_val_pdbs.txt"),
os.path.join(CPD_EMA_PDB_PATH, "ema_test_pdbs.txt"),
"Estimation of Model Accuracy",
os.path.join(FIGURE_DIR, "figS5.png"),
{"xmin": 2005,
"xmax": 2021,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 80, "SOLUTION NMR": 20, "ELECTRON MICROSCOPY": 4}},
{"xlabelx": 1990,
"xlabely": -0.4,
"ylabelx": 1952,
"ylabely": 2})
split_year_dists(os.path.join(CAT_PDB_PATH, "train_mixed.txt"),
os.path.join(CAT_PDB_PATH, "val_mixed.txt"),
[os.path.join(CAT_PDB_PATH, "test_xray.txt"),os.path.join(CAT_PDB_PATH, "test_nmr.txt")],
"Catalytic Residue Prediction",
os.path.join(FIGURE_DIR, "figS6.png"),
{"xmin": 1970,
"xmax": 2015,
"ymin": 0,
"ymax": {"X-RAY DIFFRACTION": 1700, "SOLUTION NMR": 15, "ELECTRON MICROSCOPY": 18}},
{"xlabelx": 1930,
"xlabely": -4,
"ylabelx": 1822,
"ylabely": 20})
if __name__ == "__main__":
fig1()
res_pr_curve() # fig 3b
avg_auprc_bar_ec() # fig 4a
avg_auprc_bar_ec_by_class() # fig 4b
split_year_dists_all() # figs S4-6
split_res_dists_all() # figs S7-9
|
import logging
logger = logging.getLogger(__name__)
TorchTrainer = None
TrainingOperator = None
BaseTorchTrainable = None
CreatorOperator = None
try:
import torch # noqa: F401
from ray.util.sgd.torch.torch_trainer import (TorchTrainer,
BaseTorchTrainable)
from ray.util.sgd.torch.training_operator import (TrainingOperator,
CreatorOperator)
__all__ = [
"TorchTrainer", "BaseTorchTrainable", "TrainingOperator",
"CreatorOperator"
]
except ImportError as e:
logger.warning(e)
logger.warning("PyTorch not found. TorchTrainer will not be available")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 10:43:29 2016
@author: rob
"""
import numpy as np
import tensorflow as tf
from numpy import genfromtxt
import matplotlib.pyplot as plt
sess = tf.InteractiveSession()
#Load the data
data_train = genfromtxt('ModelingTrain.csv',delimiter=',',skip_header=1)
data_test = genfromtxt('ModelingTest.csv',delimiter=',',skip_header=1)
N = data_train.shape[0]
#Create validation set
Nval = 7000
ind = np.random.permutation(N)
X_train = data_train[ind[:7000],2:]
y_train = data_train[ind[:7000],1]
X_val = data_train[ind[7000:],2:]
y_val = data_train[ind[7000:],1]
N,D = X_train.shape
Nval = X_val.shape[0]
#Check for the input sizes
assert (N>D), 'You are feeding a fat matrix for training, are you sure?'
assert (Nval>D), 'You are feeding a fat matrix for testing, are you sure?'
# Nodes for the input variables
x = tf.placeholder("float", shape=[None, D], name = 'Input_data')
y_ = tf.placeholder(tf.int64, shape=[None], name = 'Ground_truth')
# Define functions for initializing variables and standard layers
#For now, this seems superfluous, but in extending the code
#to many more layers, this will keep our code
#read-able
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name = name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name = name)
#with tf.name_scope("Conv1") as scope:
# W_conv1 = weight_variable([5, 5, 1, 32], 'Conv_Layer_1')
# b_conv1 = bias_variable([32], 'bias_for_Conv_Layer_1')
# h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
#
## The name_scope lines serve to organize our graphs that TensorFlow will create
## for us
#with tf.name_scope("Conv2") as scope:
# W_conv2 = weight_variable([5, 5, 32, 64], 'Conv_Layer_2')
# b_conv2 = bias_variable([64], 'bias_for_Conv_Layer_2')
# h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# h_pool2 = max_pool_2x2(h_conv2)
h1 = 50
h2 = 12
with tf.name_scope("Fully_Connected1") as scope:
W_fc1 = weight_variable([D, h1], 'Fully_Connected_layer_1')
b_fc1 = bias_variable([h1], 'bias_for_Fully_Connected_Layer_1')
h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)
with tf.name_scope("Fully_Connected2") as scope:
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([h1, h2], 'Fully_Connected_layer_2')
b_fc2 = bias_variable([h2], 'bias_for_Fully_Connected_Layer_2')
h_fc2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
with tf.name_scope("Fully_Connected3") as scope:
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
W_fc3 = weight_variable([h2, 7], 'Fully_Connected_layer_3')
b_fc3 = bias_variable([7], 'bias_for_Fully_Connected_Layer_3')
h_fc3 = tf.matmul(h_fc2_drop, W_fc3) + b_fc3
# Also add histograms to TensorBoard
w1_hist = tf.histogram_summary("W_fc1", W_fc1)
b1_hist = tf.histogram_summary("b_fc1", b_fc1)
w2_hist = tf.histogram_summary("W_fc2", W_fc2)
b2_hist = tf.histogram_summary("b_fc2", b_fc2)
with tf.name_scope("Softmax") as scope:
ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(h_fc3, y_, name='Cross_entropy_loss')
loss = tf.reduce_sum(ce_loss)
ce_summ = tf.scalar_summary("cross entropy", loss)
with tf.name_scope("train") as scope:
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
with tf.name_scope("Evaluating") as scope:
correct_prediction = tf.equal(tf.argmax(h_fc2,1), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
accuracy_summary = tf.scalar_summary("accuracy", accuracy)
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("/home/rob/Dropbox/DataMining/assignment2/log_tb", sess.graph_def)
#Collect the accuracies in a numpy array
iterations = 5000
acc_collect = np.zeros((2,iterations//100))
step = 0
sess.run(tf.initialize_all_variables())
for i in range(iterations):
batch_ind = np.random.choice(N,100,replace=False)
if i%100 == 0:
result = sess.run([accuracy,merged], feed_dict={ x: X_val, y_: y_val, keep_prob: 1.0})
acc = result[0]
acc_collect[0,step] = acc
summary_str = result[1]
writer.add_summary(summary_str, i)
writer.flush() #Don't forget this command! It makes sure Python writes the summaries to the log-file
print("Accuracy at step %s: %s" % (i, acc))
#Now also obtain the train_accuracy
result = sess.run([accuracy,merged], feed_dict={ x: X_train, y_: y_train, keep_prob: 1.0})
acc_collect[1,step] = result[0]
step+=1
sess.run(train_step,feed_dict={x:X_train[batch_ind], y_: y_train[batch_ind], keep_prob: 0.5})
plt.plot(acc_collect[0],label='Valid')
plt.plot(acc_collect[1],label = 'Train')
plt.legend()
plt.show()
sess.close()
# We can now open TensorBoard. Run the following line from your terminal
# tensorboard --logdir=/home/rob/Dropbox/ConvNets/tf/log_tb
#With one fc layer of 50 neurons, we go to 94% val accuracy
|
import SloppyCell.Collections as Collections
expt = Collections.Experiment('ErkMekTraverse2EGF')
expt.longname = 'EGF Stimulation 100 ng/ml - Traverse 1994'
expt.comments = """REF: S. Traverse, et. al., Curr. Biol. (1994) 4, 694
CELLTYPE: PC12
MEAS: Erk1/Mek activation in the presence of EGF at 100 ng/ml
UNITS: units/mg (see paper), different def'n for Mek and Erk
NOTES: Error bars are those in the original data, not made up
NOTES:"""
expt.SetData({'EGFstim100':{
'ErkActive': {
2.0:(2.85, 0.29),
5.0:(4.9, 0.49),
15.0:(2.35, 0.23),
30.0:(1.9, 0.35),
60.0:(0.9, 0.10),
90.0:(0.5, 0.10)
},
'MekActive': {
2.0:(1.25, 0.13),
5.0:(1.12, 0.13),
15.0:(0.2, 0.05),
30.0:(0.18, 0.05),
60.0:(0.125, 0.05),
90.0:(0.1, 0.05)
},
}
}
)
|
class CustomTypeDescriptor(object, ICustomTypeDescriptor):
""" Provides a simple default implementation of the System.ComponentModel.ICustomTypeDescriptor interface. """
def GetAttributes(self):
"""
GetAttributes(self: CustomTypeDescriptor) -> AttributeCollection
Returns a collection of custom attributes for the type represented by this type descriptor.
Returns: An System.ComponentModel.AttributeCollection containing the attributes for the type. The default
is System.ComponentModel.AttributeCollection.Empty.
"""
pass
def GetClassName(self):
"""
GetClassName(self: CustomTypeDescriptor) -> str
Returns the fully qualified name of the class represented by this type descriptor.
Returns: A System.String containing the fully qualified class name of the type this type descriptor is
describing. The default is null.
"""
pass
def GetComponentName(self):
"""
GetComponentName(self: CustomTypeDescriptor) -> str
Returns the name of the class represented by this type descriptor.
Returns: A System.String containing the name of the component instance this type descriptor is
describing. The default is null.
"""
pass
def GetConverter(self):
"""
GetConverter(self: CustomTypeDescriptor) -> TypeConverter
Returns a type converter for the type represented by this type descriptor.
Returns: A System.ComponentModel.TypeConverter for the type represented by this type descriptor. The
default is a newly created System.ComponentModel.TypeConverter.
"""
pass
def GetDefaultEvent(self):
"""
GetDefaultEvent(self: CustomTypeDescriptor) -> EventDescriptor
Returns the event descriptor for the default event of the object represented by this type
descriptor.
Returns: The System.ComponentModel.EventDescriptor for the default event on the object represented by
this type descriptor. The default is null.
"""
pass
def GetDefaultProperty(self):
"""
GetDefaultProperty(self: CustomTypeDescriptor) -> PropertyDescriptor
Returns the property descriptor for the default property of the object represented by this type
descriptor.
Returns: A System.ComponentModel.PropertyDescriptor for the default property on the object represented by
this type descriptor. The default is null.
"""
pass
def GetEditor(self, editorBaseType):
"""
GetEditor(self: CustomTypeDescriptor,editorBaseType: Type) -> object
Returns an editor of the specified type that is to be associated with the class represented by
this type descriptor.
editorBaseType: The base type of the editor to retrieve.
Returns: An editor of the given type that is to be associated with the class represented by this type
descriptor. The default is null.
"""
pass
def GetEvents(self, attributes=None):
"""
GetEvents(self: CustomTypeDescriptor,attributes: Array[Attribute]) -> EventDescriptorCollection
Returns a filtered collection of event descriptors for the object represented by this type
descriptor.
attributes: An array of attributes to use as a filter. This can be null.
Returns: An System.ComponentModel.EventDescriptorCollection containing the event descriptions for the
object represented by this type descriptor. The default is
System.ComponentModel.EventDescriptorCollection.Empty.
GetEvents(self: CustomTypeDescriptor) -> EventDescriptorCollection
Returns a collection of event descriptors for the object represented by this type descriptor.
Returns: An System.ComponentModel.EventDescriptorCollection containing the event descriptors for the
object represented by this type descriptor. The default is
System.ComponentModel.EventDescriptorCollection.Empty.
"""
pass
def GetProperties(self, attributes=None):
"""
GetProperties(self: CustomTypeDescriptor) -> PropertyDescriptorCollection
Returns a collection of property descriptors for the object represented by this type descriptor.
Returns: A System.ComponentModel.PropertyDescriptorCollection containing the property descriptions for
the object represented by this type descriptor. The default is
System.ComponentModel.PropertyDescriptorCollection.Empty.
GetProperties(self: CustomTypeDescriptor,attributes: Array[Attribute]) -> PropertyDescriptorCollection
Returns a filtered collection of property descriptors for the object represented by this type
descriptor.
attributes: An array of attributes to use as a filter. This can be null.
Returns: A System.ComponentModel.PropertyDescriptorCollection containing the property descriptions for
the object represented by this type descriptor. The default is
System.ComponentModel.PropertyDescriptorCollection.Empty.
"""
pass
def GetPropertyOwner(self, pd):
"""
GetPropertyOwner(self: CustomTypeDescriptor,pd: PropertyDescriptor) -> object
Returns an object that contains the property described by the specified property descriptor.
pd: The property descriptor for which to retrieve the owning object.
Returns: An System.Object that owns the given property specified by the type descriptor. The default is
null.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *args): # cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,parent: ICustomTypeDescriptor)
"""
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
|
import subprocess
'''
该文件内容为手机截图操作
'''
class Screenshot():#截取手机屏幕并保存到电脑
def screen(self,cmd):#在手机上截图
screenExecute=subprocess.Popen(str(cmd),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
stdout, stderr = screenExecute.communicate()
# 输出执行命令结果结果
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
def saveComputer(self,cmd):#将截图保存到电脑
screenExecute = subprocess.Popen(str(cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
stdout, stderr = screenExecute.communicate()
# 输出执行命令结果结果
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
cmd1=r"adb shell /system/bin/screencap -p /sdcard/screenshot.png" #命令1:在手机上截图3.png为图片名
cmd2=r"adb pull /sdcard/screenshot.png" #命令2:将图片保存到电脑 d:/3.png为要保存到电脑的路径
screen=Screenshot()
screen.screen(cmd1)
screen.saveComputer(cmd2)
|
import time
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def timer(func):
"""
Returns a timer decorator.
See logger for info!
:type func: method
:rtype: method
"""
# Define wrapper
#
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
log.info('%s took %s ms.' % (func.__name__, (end - start) * 1000))
return result
return wrapper
|
#
#
#
import unittest, sys
import IfxPy
import config
from testfunctions import IfxPyTestFunctions
class IfxPyTestCase(unittest.TestCase):
def test_020_RollbackDelete(self):
obj = IfxPyTestFunctions()
obj.assert_expect(self.run_test_020)
def run_test_020(self):
conn = IfxPy.connect(config.ConnStr, config.user, config.password)
if conn:
stmt = IfxPy.exec_immediate(conn, "SELECT count(*) FROM animals")
res = IfxPy.fetch_tuple(stmt)
rows = res[0]
print rows
IfxPy.autocommit(conn, IfxPy.SQL_AUTOCOMMIT_OFF)
ac = IfxPy.autocommit(conn)
if ac != 0:
print "Cannot set IfxPy.SQL_AUTOCOMMIT_OFF\nCannot run test"
#continue
IfxPy.exec_immediate(conn, "DELETE FROM animals")
stmt = IfxPy.exec_immediate(conn, "SELECT count(*) FROM animals")
res = IfxPy.fetch_tuple(stmt)
rows = res[0]
print rows
IfxPy.rollback(conn)
stmt = IfxPy.exec_immediate(conn, "SELECT count(*) FROM animals")
res = IfxPy.fetch_tuple(stmt)
rows = res[0]
print rows
IfxPy.close(conn)
else:
print "Connection failed."
#__END__
#__LUW_EXPECTED__
#7
#0
#7
#__ZOS_EXPECTED__
#7
#0
#7
#__SYSTEMI_EXPECTED__
#7
#0
#7
#__IDS_EXPECTED__
#7
#0
#7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-03-19 15:18
# @Author : erwin
import pandas as pd
from common.util_function import *
df1 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'd'], 'gender': ['male', 'male', 'female', 'female']})
df2 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'e'], 'age': [21, 22, 23, 20]})
# 字段求差集
ds1 = set([tuple(line) for line in df1[['name']].values.tolist()])
ds2 = set([tuple(line) for line in df2[['name']].values.tolist()])
for d in ds1.difference(ds2):
print_br(d)
|
from .context import pset
from nose.tools import raises
import numpy as np
class TestFreeParameter:
@classmethod
def setup_class(cls):
cls.p0 = pset.FreeParameter('var0__FREE', 'normal_var', 0, 1)
cls.p1 = pset.FreeParameter('var1__FREE', 'lognormal_var', 1, 2)
cls.p2 = pset.FreeParameter('var2__FREE', 'loguniform_var', 0.01, 100)
cls.p3 = pset.FreeParameter('var2__FREE', 'uniform_var', 0, 10)
cls.p4 = pset.FreeParameter('var2__FREE', 'uniform_var', 0, 10, bounded=False)
@classmethod
def teardown_class(cls):
pass
def test_check_init(self):
print(self.p0.value)
assert self.p0.value is None
assert self.p0.type == 'normal_var'
assert not self.p0.bounded
assert not self.p1.bounded
assert self.p1.lower_bound == 0.0
assert np.isinf(self.p1.upper_bound)
assert self.p2.upper_bound == 100
assert self.p3.bounded
print(self.p4.bounded)
assert not self.p4.bounded
@raises(pset.OutOfBoundsException)
def test_check_erroneous_assignment(self):
pset.FreeParameter('var2__FREE', 'loguniform_var', 0.01, 100, value=1000)
def test_distribution(self):
xs = [self.p3.sample_value().value for x in range(100000)]
for x in xs:
assert self.p3.lower_bound <= x < self.p3.upper_bound
ys = [self.p0.sample_value().value for x in range(100000)]
assert np.all(np.array(ys)>=0.0)
def test_sample_value(self):
p0s = self.p0.sample_value()
assert p0s.value is not None
def test_freeparameter_equality(self):
p6 = self.p0.sample_value()
p0s = self.p0.set_value(p6.value)
print(p0s, p6)
assert p6 == p0s
def test_add(self):
p7 = self.p0.set_value(1)
p7a = p7.add(1)
assert p7a.value == 2
p8 = self.p2.set_value(1)
p8a = p8.add(1)
assert p8a.value == 10
def test_diff(self):
p9 = self.p0.set_value(1)
p10 = self.p0.set_value(2)
assert p9.diff(p10) == -1
p11 = self.p2.set_value(10)
p12 = self.p2.set_value(100)
assert p12.diff(p11) == 1
def test_reflect(self):
assert self.p3.set_value(11).value == 9
assert self.p3.set_value(12).value == 8
assert self.p3.set_value(25).value == 5
assert self.p2.set_value(1000).value == 10
def test_set_value(self):
p13 = self.p0.set_value(1)
assert p13.lower_bound == self.p0.lower_bound
assert p13.upper_bound == self.p0.upper_bound
p14 = self.p4.set_value(100)
assert p14.lower_bound == self.p4.lower_bound
assert p14.upper_bound == self.p4.upper_bound
@raises(pset.OutOfBoundsException)
def test_no_reflect(self):
self.p3.set_value(11, False)
|
import numpy as np
with open('input.txt', 'r') as f:
s = f.read().strip()
#s = '0222112222120000'
nr = 6
nc = 25
x = [int(c) for c in s]
d = np.array(x)
d = np.reshape(d, (-1, nr, nc))
nb = d.shape[0]
tups = []
for b in range(nb):
band = d[b,:,:]
unique, counts = np.unique(band, return_counts=True)
tups.append(list(counts))
tups.sort()
print(tups[0][1]*tups[0][2])
for r in range(nr):
for c in range(nc):
for b in range(nb):
v = d[b,r,c]
if v != 2:
break
print({0:' ', 1:'X', 2:'.'}[v], end='')
print('\n', end='')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Fuzzing in the Large" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
# Last change: 2021-11-03 13:27:49+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Fuzzing in the Large
This file can be _executed_ as a script, running all experiments:
$ python FuzzingInTheLarge.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.FuzzingInTheLarge import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
The Python `FuzzManager` package allows for programmatic submission of failures from a large number of (fuzzed) programs. One can query crashes and their details, collect them into buckets to ensure thay will be treated the same, and also retrieve coverage information for debugging both programs and their tests.
For more details, source, and documentation, see
"The Fuzzing Book - Fuzzing in the Large"
at https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Fuzzing in the Large
# ====================
if __name__ == '__main__':
print('# Fuzzing in the Large')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from . import Fuzzer
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Collecting Crashes from Multiple Fuzzers
## ----------------------------------------
if __name__ == '__main__':
print('\n## Collecting Crashes from Multiple Fuzzers')
from graphviz import Digraph
if __name__ == '__main__':
g = Digraph()
server = 'Crash Server'
g.node('Crash Database', shape='cylinder')
for i in range(1, 7):
g.edge('Fuzzer ' + repr(i), server)
g.edge(server, 'Crash Database')
g
## Running a Crash Server
## ----------------------
if __name__ == '__main__':
print('\n## Running a Crash Server')
### Excursion: Setting up the Server
if __name__ == '__main__':
print('\n### Excursion: Setting up the Server')
import os
import sys
import shutil
if __name__ == '__main__':
if 'CI' in os.environ:
# Can't run this in our continuous environment,
# since it can't run a headless Web browser
sys.exit(0)
if __name__ == '__main__':
if os.path.exists('FuzzManager'):
shutil.rmtree('FuzzManager')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/MozillaSecurity/FuzzManager')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; git checkout 0.4.1')
if __name__ == '__main__':
import os
os.system(f'pip install -r FuzzManager/server/requirements.txt > /dev/null')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python server/manage.py migrate > /dev/null')
if __name__ == '__main__':
import os
os.system(f'(cd FuzzManager; echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\'demo\', \'demo@fuzzingbook.org\', \'demo\')" | python server/manage.py shell)')
import subprocess
import sys
if __name__ == '__main__':
os.chdir('FuzzManager')
result = subprocess.run(['python',
'server/manage.py',
'get_auth_token',
'demo'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
os.chdir('..')
err = result.stderr.decode('ascii')
if len(err) > 0:
print(err, file=sys.stderr, end="")
if __name__ == '__main__':
token = result.stdout
token = token.decode('ascii').strip()
token
if __name__ == '__main__':
assert len(token) > 10, "Invalid token " + repr(token)
if __name__ == '__main__':
home = os.path.expanduser("~")
conf = os.path.join(home, ".fuzzmanagerconf")
if __name__ == '__main__':
fuzzmanagerconf = """
[Main]
sigdir = /home/example/fuzzingbook
serverhost = 127.0.0.1
serverport = 8000
serverproto = http
serverauthtoken = %s
tool = fuzzingbook
""" % token
if __name__ == '__main__':
with open(conf, "w") as file:
file.write(fuzzmanagerconf)
from pygments.lexers.configs import IniLexer
from .bookutils import print_file
if __name__ == '__main__':
print_file(conf, lexer=IniLexer())
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Starting the Server
if __name__ == '__main__':
print('\n### Excursion: Starting the Server')
from multiprocess import Process
import subprocess
def run_fuzzmanager():
def run_fuzzmanager_forever():
os.chdir('FuzzManager')
proc = subprocess.Popen(['python', 'server/manage.py',
'runserver'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
while True:
line = proc.stdout.readline()
print(line, end='')
fuzzmanager_process = Process(target=run_fuzzmanager_forever)
fuzzmanager_process.start()
return fuzzmanager_process
if __name__ == '__main__':
fuzzmanager_process = run_fuzzmanager()
import time
if __name__ == '__main__':
time.sleep(2)
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Logging In
if __name__ == '__main__':
print('\n### Logging In')
if __name__ == '__main__':
fuzzmanager_url = "http://127.0.0.1:8000"
if __name__ == '__main__':
from IPython.display import display, Image
from .bookutils import HTML, rich_output
from .GUIFuzzer import start_webdriver # minor dependency
if __name__ == '__main__':
gui_driver = start_webdriver(headless=True, zoom=1.2)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 600)
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
username = gui_driver.find_element_by_name("username")
username.send_keys("demo")
if __name__ == '__main__':
password = gui_driver.find_element_by_name("password")
password.send_keys("demo")
if __name__ == '__main__':
login = gui_driver.find_element_by_tag_name("button")
login.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Crashes
## ------------------
if __name__ == '__main__':
print('\n## Collecting Crashes')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy')
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make)')
from .bookutils import print_file
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.cpp")
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.fuzzmanagerconf", lexer=IniLexer())
if __name__ == '__main__':
import os
os.system(f'simply-buggy/simple-crash')
import subprocess
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
if __name__ == '__main__':
result = subprocess.run(cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Yay, we crashed!")
else:
print("Move along, nothing to see...")
### Program Configurations
if __name__ == '__main__':
print('\n### Program Configurations')
if __name__ == '__main__':
sys.path.append('FuzzManager')
if __name__ == '__main__':
from FTB.ProgramConfiguration import ProgramConfiguration
if __name__ == '__main__':
configuration = ProgramConfiguration.fromBinary('simply-buggy/simple-crash')
(configuration.product, configuration.platform)
### Crash Info
if __name__ == '__main__':
print('\n### Crash Info')
if __name__ == '__main__':
from FTB.Signatures.CrashInfo import CrashInfo
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if __name__ == '__main__':
stderr = result.stderr.decode().splitlines()
stderr[0:3]
if __name__ == '__main__':
stdout = result.stdout.decode().splitlines()
stdout
if __name__ == '__main__':
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
print(crashInfo)
### Collector
if __name__ == '__main__':
print('\n### Collector')
if __name__ == '__main__':
from Collector.Collector import Collector
if __name__ == '__main__':
collector = Collector()
if __name__ == '__main__':
collector.submit(crashInfo)
### Inspecting Crashes
if __name__ == '__main__':
print('\n### Inspecting Crashes')
if __name__ == '__main__':
gui_driver.refresh()
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
crash = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/crashmanager/crashes/")]')
crash.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Crash Buckets
## -------------
if __name__ == '__main__':
print('\n## Crash Buckets')
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
create = gui_driver.find_element_by_xpath('//a[contains(@href,"/signatures/new/")]')
create.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1200)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
save = gui_driver.find_element_by_name("submit_save")
save.click()
time.sleep(1)
### Crash Signatures
if __name__ == '__main__':
print('\n### Crash Signatures')
if __name__ == '__main__':
gui_driver.set_window_size(1400, 800)
Image(gui_driver.get_screenshot_as_png())
### Coarse-Grained Signatures
if __name__ == '__main__':
print('\n### Coarse-Grained Signatures')
if __name__ == '__main__':
print_file("simply-buggy/out-of-bounds.cpp")
import os
import random
import subprocess
import tempfile
import sys
#### Excursion: `escapelines()` implementatipn
if __name__ == '__main__':
print('\n#### Excursion: `escapelines()` implementatipn')
def isascii(s):
return all([0 <= ord(c) <= 127 for c in s])
if __name__ == '__main__':
isascii('Hello,')
def escapelines(bytes):
def ascii_chr(byte):
if 0 <= byte <= 127:
return chr(byte)
return r"\x%02x" % byte
def unicode_escape(line):
ret = "".join(map(ascii_chr, line))
assert isascii(ret)
return ret
return [unicode_escape(line) for line in bytes.splitlines()]
if __name__ == '__main__':
escapelines(b"Hello,\nworld!")
if __name__ == '__main__':
escapelines(b"abc\xffABC")
#### End of Excursion
if __name__ == '__main__':
print('\n#### End of Excursion')
if __name__ == '__main__':
cmd = ["simply-buggy/out-of-bounds"]
# Connect to crash server
collector = Collector()
random.seed(2048)
crash_count = 0
TRIALS = 20
for itnum in range(0, TRIALS):
rand_len = random.randint(1, 1024)
rand_data = bytes([random.randrange(0, 256) for i in range(rand_len)])
(fd, current_file) = tempfile.mkstemp(prefix="fuzztest", text=True)
os.write(fd, rand_data)
os.close(fd)
current_cmd = []
current_cmd.extend(cmd)
current_cmd.append(current_file)
result = subprocess.run(current_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = [] # escapelines(result.stdout)
stderr = escapelines(result.stderr)
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
print(itnum, end=" ")
if crashed:
sys.stdout.write("(Crash) ")
# This reads the simple-crash.fuzzmanagerconf file
configuration = ProgramConfiguration.fromBinary(cmd[0])
# This reads and parses our ASan trace into a more generic format,
# returning us a generic "CrashInfo" object that we can inspect
# and/or submit to the server.
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
# Submit the crash
collector.submit(crashInfo, testCase = current_file)
crash_count += 1
os.remove(current_file)
print("")
print("Done, submitted %d crashes after %d runs." % (crash_count, TRIALS))
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/crashmanager/crashes")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Code Coverage
## ------------------------
if __name__ == '__main__':
print('\n## Collecting Code Coverage')
if __name__ == '__main__':
print_file("simply-buggy/maze.cpp")
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make clean && make coverage)')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy $HOME/simply-buggy-server ')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 server/manage.py setup_repository simply-buggy GITSourceCodeProvider $HOME/simply-buggy-server')
import random
import subprocess
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
constants = [3735928559, 1111638594]
TRIALS = 1000
for itnum in range(0, TRIALS):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0, 4):
if random.randint(0, 9) < 3:
current_cmd.append(str(constants[
random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr and "secret" in stderr[0]:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
if __name__ == '__main__':
import os
os.system(f'export PATH=$HOME/.cargo/bin:$PATH; grcov simply-buggy/ -t coveralls+ --commit-sha $(cd simply-buggy && git rev-parse HEAD) --token NONE -p `pwd`/simply-buggy/ > coverage.json')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 -mCovReporter --repository simply-buggy --description "Test1" --submit ../coverage.json')
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/covmanager")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
first_id = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/browse")]')
first_id.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
maze_cpp = gui_driver.find_element_by_xpath("//*[contains(text(), 'maze.cpp')]")
maze_cpp.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1400)
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
# Added the missing constant here
constants = [3735928559, 1111638594, 3405695742]
for itnum in range(0,1000):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0,4):
if random.randint(0, 9) < 3:
current_cmd.append(str(
constants[random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
if __name__ == '__main__':
fuzzmanager_process.terminate()
if __name__ == '__main__':
gui_driver.quit()
import shutil
if __name__ == '__main__':
for temp_file in ['coverage.json', 'geckodriver.log', 'ghostdriver.log']:
if os.path.exists(temp_file):
os.remove(temp_file)
if __name__ == '__main__':
home = os.path.expanduser("~")
for temp_dir in ['coverage', 'simply-buggy', 'simply-buggy-server',
os.path.join(home, 'simply-buggy-server'),
'FuzzManager']:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
### Exercise 1: Automatic Crash Reporting
if __name__ == '__main__':
print('\n### Exercise 1: Automatic Crash Reporting')
|
import cv2, numpy
img = cv2.imread('flower.png')
columns = img.shape[1]
rows = img.shape[0]
transformation_matrix = numpy.float32([[1, 0, 200], [0, 1, 100]])
transformed_image = cv2.warpAffine(img, transformation_matrix, (columns, rows))
cv2.imshow('Image', transformed_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#! /usr/bin/env python3
import argparse
import subprocess
import sys
import unittest
from test_bech32 import TestSegwitAddress
from test_coldcard import coldcard_test_suite
from test_descriptor import TestDescriptor
from test_device import start_bitcoind
from test_psbt import TestPSBT
from test_trezor import trezor_test_suite
from test_ledger import ledger_test_suite
from test_digitalbitbox import digitalbitbox_test_suite
from test_keepkey import keepkey_test_suite
from test_udevrules import TestUdevRulesInstaller
parser = argparse.ArgumentParser(description='Setup the testing environment and run automated tests')
trezor_group = parser.add_mutually_exclusive_group()
trezor_group.add_argument('--no_trezor', help='Do not run Trezor test with emulator', action='store_true')
trezor_group.add_argument('--trezor', help='Path to Trezor emulator.', default='work/trezor-mcu/firmware/trezor.elf')
coldcard_group = parser.add_mutually_exclusive_group()
coldcard_group.add_argument('--no_coldcard', help='Do not run Coldcard test with simulator', action='store_true')
coldcard_group.add_argument('--coldcard', help='Path to Coldcard simulator.', default='work/firmware/unix/headless.py')
ledger_group = parser.add_mutually_exclusive_group()
ledger_group.add_argument('--ledger', help='Run physical Ledger Nano S/X tests.', action='store_true')
keepkey_group = parser.add_mutually_exclusive_group()
keepkey_group.add_argument('--no_keepkey', help='Do not run Keepkey test with emulator', action='store_true')
keepkey_group.add_argument('--keepkey', help='Path to Keepkey emulator.', default='work/keepkey-firmware/bin/kkemu')
dbb_group = parser.add_mutually_exclusive_group()
dbb_group.add_argument('--no_bitbox', help='Do not run Digital Bitbox test with simulator', action='store_true')
dbb_group.add_argument('--bitbox', help='Path to Digital bitbox simulator.', default='work/mcu/build/bin/simulator')
parser.add_argument('--bitcoind', help='Path to bitcoind.', default='work/bitcoin/src/bitcoind')
parser.add_argument('--interface', help='Which interface to send commands over', choices=['library', 'cli', 'bindist', 'stdin'], default='library')
args = parser.parse_args()
# Run tests
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestDescriptor))
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestSegwitAddress))
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestPSBT))
if sys.platform.startswith("linux"):
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestUdevRulesInstaller))
if not args.no_trezor or not args.no_coldcard or args.ledger or not args.no_bitbox or not args.no_keepkey:
# Start bitcoind
rpc, userpass = start_bitcoind(args.bitcoind)
if not args.no_trezor:
suite.addTest(trezor_test_suite(args.trezor, rpc, userpass, args.interface))
if not args.no_coldcard:
suite.addTest(coldcard_test_suite(args.coldcard, rpc, userpass, args.interface))
if args.ledger:
suite.addTest(ledger_test_suite(rpc, userpass, args.interface))
if not args.no_bitbox:
suite.addTest(digitalbitbox_test_suite(rpc, userpass, args.bitbox, args.interface))
if not args.no_keepkey:
suite.addTest(keepkey_test_suite(args.keepkey, rpc, userpass, args.interface))
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
from __future__ import unicode_literals
from .nuevo import NuevoBaseIE
class AnitubeIE(NuevoBaseIE):
IE_NAME = 'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
_TEST = {
'url': 'http://www.anitube.se/video/36621',
'md5': '59d0eeae28ea0bc8c05e7af429998d43',
'info_dict': {
'id': '36621',
'ext': 'mp4',
'title': 'Recorder to Randoseru 01',
'duration': 180.19,
},
'skip': 'Blocked in the US',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
key = self._search_regex(
r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key')
return self._extract_nuevo(
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, video_id)
|
import hashlib
from django.core.cache import cache
from django.utils.translation import ugettext as _
from tools.context_processors import manual_url
class PrestaError(Exception):
KEY = "key"
DOMAIN = "domain"
API_PHP_INSTALL = _('Please install api.php file')
REGEN_HTACCESS = _('Please regenerate .htaccess file')
NO_PRODUCTS = _('No products found')
NOT_REACHABLE = _('URL is not reachable')
NOT_ACTIVATED = _('Please activate web-service')
PROD_OPTIONS_VALUES = _('Enable product_option_values checkbox')
PROD_COMBINATIONS = _('Enable combinations checkbox')
@staticmethod
def get_error(key, value, api):
"""Set cache key to customize error message if we get the same more than once"""
#TODO: may be put this into session, but need to think about cleaning them
user_id = api.shop.user.user_id
cache_key = hashlib.sha256(key+"|"+value+"|"+str(user_id)).hexdigest()
if cache.get(cache_key):
value = _(value)+ ', <a target="_blank" href="%s#%s">%s<a/>' %\
(manual_url(None)['manual_url'], api.__version__,_('instructions'))
else:
cache.set(cache_key, value, 300)
value = _(value)
return {key: value}
@classmethod
def rewrite_disabled(cls, error_dict):
if error_dict.has_key(cls.DOMAIN) and \
error_dict[cls.DOMAIN].startswith(_(cls.API_PHP_INSTALL)):
return True
return False
|
# coding: utf-8
import pprint
import re
import six
class CinderExportToImageOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'container_format': 'str',
'disk_format': 'str',
'force': 'bool',
'image_name': 'str',
'os_type': 'str'
}
attribute_map = {
'container_format': 'container_format',
'disk_format': 'disk_format',
'force': 'force',
'image_name': 'image_name',
'os_type': '__os_type'
}
def __init__(self, container_format='bare', disk_format='vhd', force=None, image_name=None, os_type='linux'):
"""CinderExportToImageOption - a model defined in huaweicloud sdk"""
self._container_format = None
self._disk_format = None
self._force = None
self._image_name = None
self._os_type = None
self.discriminator = None
if container_format is not None:
self.container_format = container_format
if disk_format is not None:
self.disk_format = disk_format
if force is not None:
self.force = force
self.image_name = image_name
if os_type is not None:
self.os_type = os_type
@property
def container_format(self):
"""Gets the container_format of this CinderExportToImageOption.
云硬盘导出镜像的容器类型。 目前支持ami、ari、aki、ovf、bare。默认是bare。
:return: The container_format of this CinderExportToImageOption.
:rtype: str
"""
return self._container_format
@container_format.setter
def container_format(self, container_format):
"""Sets the container_format of this CinderExportToImageOption.
云硬盘导出镜像的容器类型。 目前支持ami、ari、aki、ovf、bare。默认是bare。
:param container_format: The container_format of this CinderExportToImageOption.
:type: str
"""
self._container_format = container_format
@property
def disk_format(self):
"""Gets the disk_format of this CinderExportToImageOption.
云硬盘导出镜像的格式。 目前支持vhd、zvhd、zvhd2、raw、qcow2。默认是vhd。
:return: The disk_format of this CinderExportToImageOption.
:rtype: str
"""
return self._disk_format
@disk_format.setter
def disk_format(self, disk_format):
"""Sets the disk_format of this CinderExportToImageOption.
云硬盘导出镜像的格式。 目前支持vhd、zvhd、zvhd2、raw、qcow2。默认是vhd。
:param disk_format: The disk_format of this CinderExportToImageOption.
:type: str
"""
self._disk_format = disk_format
@property
def force(self):
"""Gets the force of this CinderExportToImageOption.
强制导出镜像的标示,默认值是false。 当force标记为false时,云硬盘处于正在使用状态时,不能强制导出镜像。 当force标记为true时,即使云硬盘处于正在使用状态时,仍可以导出镜像。
:return: The force of this CinderExportToImageOption.
:rtype: bool
"""
return self._force
@force.setter
def force(self, force):
"""Sets the force of this CinderExportToImageOption.
强制导出镜像的标示,默认值是false。 当force标记为false时,云硬盘处于正在使用状态时,不能强制导出镜像。 当force标记为true时,即使云硬盘处于正在使用状态时,仍可以导出镜像。
:param force: The force of this CinderExportToImageOption.
:type: bool
"""
self._force = force
@property
def image_name(self):
"""Gets the image_name of this CinderExportToImageOption.
云硬盘导出镜像的名称。 名称的长度范围为1~128位。 名称只能包含以下字符:大写字母、小写字母、中文、数字、特殊字符包含“-”、“.”、“_”和空格。
:return: The image_name of this CinderExportToImageOption.
:rtype: str
"""
return self._image_name
@image_name.setter
def image_name(self, image_name):
"""Sets the image_name of this CinderExportToImageOption.
云硬盘导出镜像的名称。 名称的长度范围为1~128位。 名称只能包含以下字符:大写字母、小写字母、中文、数字、特殊字符包含“-”、“.”、“_”和空格。
:param image_name: The image_name of this CinderExportToImageOption.
:type: str
"""
self._image_name = image_name
@property
def os_type(self):
"""Gets the os_type of this CinderExportToImageOption.
云硬盘导出镜像的系统类型。目前只支持“windows”和“linux”,默认值是“linux”。 说明: 只有云硬盘的volume_image_metadata信息中无“__os_type”字段且云硬盘状态为“available”时,设置的__os_type才会生效。 如果不传递该参数,则使用默认的“linux”值作为镜像的系统类型。
:return: The os_type of this CinderExportToImageOption.
:rtype: str
"""
return self._os_type
@os_type.setter
def os_type(self, os_type):
"""Sets the os_type of this CinderExportToImageOption.
云硬盘导出镜像的系统类型。目前只支持“windows”和“linux”,默认值是“linux”。 说明: 只有云硬盘的volume_image_metadata信息中无“__os_type”字段且云硬盘状态为“available”时,设置的__os_type才会生效。 如果不传递该参数,则使用默认的“linux”值作为镜像的系统类型。
:param os_type: The os_type of this CinderExportToImageOption.
:type: str
"""
self._os_type = os_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CinderExportToImageOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""
Script to roll out a trained agent on the environment for several episodes.
"""
import json
import os
import re
import argparse
import numpy as np
import torch
from utils import helpers as utl
from utils.evaluation import evaluate
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def evaluate_varibad(model_path,
result_file_name,
test_space=None,
num_episodes=3,
rollouts_per_seed=8,
recompute_results=True
):
# check if we already evaluated this; in that case just load from disk
#precomputed_results_path = os.path.join(exp_directory, 'results', 'end_performance_per_episode')
precomputed_results_path = 'final_performance_per_episode'
if not os.path.exists(precomputed_results_path):
os.mkdir(precomputed_results_path)
precomputed_results_file = os.path.join(precomputed_results_path, result_file_name)
if os.path.exists(precomputed_results_file + '.npy') and not recompute_results:
return np.load(precomputed_results_file + '.npy')
# the folder for this environment and this method
# route to the sub-level folder which contains the result folders for different runs
exp_directory = os.path.join(model_path, os.listdir(model_path)[-1])
results = []
# loop through different runs
for r_fold in os.listdir(exp_directory):
if r_fold[0] == '.':
continue
# this is the current results folder we're in
results_path = os.path.join(exp_directory, r_fold)
# get config file
#with open(os.path.join(results_path, 'config.json')) as json_data_file:
with open(os.path.join(results_path, 'config.json')) as json_data_file:
config = json.load(json_data_file)
config = Bunch(config)
'''
# TODO: remove again, this is a hack for CheetahDir
if env_name == 'cheetah_dir':
config.env_name = 'HalfCheetahDir-v0'
elif env_name == 'cheetah_hop':
config.env_name = 'Hop-v0'
'''
# change the test space if necessary
if config.env_name == 'PointEnv-v0':
if test_space is not None:
config.goal_sampler = test_space
# get the latest model
model_path = os.path.join(exp_directory, r_fold, 'models')
print('Loading latest model from run ', model_path)
model_files = os.listdir(model_path)
try:
model_idx = max([int(''.join(re.findall(r'[0-9]', m))) for m in model_files])
print('loadig model policy{}.pt'.format(model_idx))
except ValueError:
model_idx = ''
# get policy network
policy = torch.load(os.path.join(results_path, 'models', 'policy{}.pt'.format(model_idx)), map_location=device)
# try to get encoder
try:
encoder = torch.load(os.path.join(results_path, 'models', 'encoder{}.pt'.format(model_idx)), map_location=device)
except FileNotFoundError:
encoder = None
# get the normalisation parameters for the environment
try:
ret_rms = utl.load_obj(os.path.join(results_path, 'models'), "env_rew_rms{0}".format(model_idx))
except FileNotFoundError:
ret_rms = None
# test on the same tasks if training tasks are specified
task_path = os.path.join(results_path, 'train_tasks.pkl')
if os.path.exists(task_path):
tasks = utl.load_obj(results_path, 'train_tasks')
print (tasks[0])
else:
tasks = None
returns = run_policy(config, policy, ret_rms, encoder, num_episodes, rollouts_per_seed, tasks)
print(returns)
# add the returns of the current experiment!
results.append(returns.cpu())
# shape: num_seeds * num_episodes
results = np.stack(results)
# save the results so we don't have to recompute them every time
np.save(precomputed_results_file, results)
return results
def run_policy(config, policy, ret_rms, encoder, num_episodes, rollouts_per_seed, tasks):
avg_return_per_episode = 0
for i in range(rollouts_per_seed):
returns_per_episode = evaluate(config, policy, ret_rms, iter_idx=i, tasks=tasks, num_episodes=num_episodes, encoder=encoder)
avg_return_per_episode += returns_per_episode.mean(dim=0)
avg_return_per_episode /= rollouts_per_seed
return avg_return_per_episode
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True, help='the folder to load the model for evaluation')
parser.add_argument('--result_file_name', type=str, required=True, help='the sub-folder to save the results')
parser.add_argument('--test_space', type=str, default=None, help='specify the test space; if None, test in the training space')
parser.add_argument('--num_episodes', type=int, default=3, help='the length of the meta-episode')
parser.add_argument('--num_evaluation', type=int, default=8, help='the number of tasks to test on')
parser.add_argument('--recompute_results', type=bool, default=True)
args = parser.parse_args()
evaluate_varibad(model_path=args.model_path,
result_file_name=args.result_file_name,
test_space=args.test_space,
num_episodes=args.num_episodes,
rollouts_per_seed=args.num_evaluation,
recompute_results=args.recompute_results
)
|
import os
from os.path import join
from pathlib import Path
import json
from unittest.mock import patch
import src.superannotate as sa
from tests.integration.base import BaseTestCase
import tempfile
import pytest
class TestRecursiveFolderPixel(BaseTestCase):
PROJECT_NAME = "test_recursive_pixel"
PROJECT_DESCRIPTION = "Desc"
PROJECT_TYPE = "Pixel"
S3_FOLDER_PATH = "sample_project_pixel"
TEST_FOLDER_PATH = "data_set/sample_project_pixel"
IMAGE_NAME = "example_image_1.jpg"
FOLDER = "f"
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
@property
def folder_path(self):
return os.path.join(Path(__file__).parent.parent.parent, self.TEST_FOLDER_PATH)
@pytest.mark.flaky(reruns=2)
@patch("lib.core.usecases.annotations.UploadAnnotationUseCase.s3_bucket")
def test_recursive_annotation_upload_pixel(self, s3_bucket):
sa.create_folder(self.PROJECT_NAME, self.FOLDER)
destination = f"{self.PROJECT_NAME}/{self.FOLDER}"
sa.upload_images_from_folder_to_project(
destination, self.folder_path, recursive_subfolders=False
)
uploaded_annotations, _, _ = sa.upload_annotations_from_folder_to_project(destination,
self.S3_FOLDER_PATH,
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=False)
self.assertEqual(len(uploaded_annotations), 3)
self.assertEqual(len(s3_bucket.method_calls), 6)
self.assertIn(f"Uploading 3 annotations from {self.S3_FOLDER_PATH} to the project {destination}.",
self._caplog.text)
uploaded_annotations, _, _ = sa.upload_preannotations_from_folder_to_project(destination,
self.S3_FOLDER_PATH,
from_s3_bucket="superannotate-python-sdk-test",
recursive_subfolders=False)
self.assertEqual(len(s3_bucket.method_calls), 12)
self.assertIn(f"Uploading 3 annotations from {self.S3_FOLDER_PATH} to the project {destination}.",
self._caplog.text)
@pytest.mark.flaky(reruns=2)
def test_annotation_upload_pixel(self):
sa.upload_images_from_folder_to_project(self.PROJECT_NAME, self.folder_path)
sa.upload_annotations_from_folder_to_project(self.PROJECT_NAME, self.folder_path)
with tempfile.TemporaryDirectory() as tmp_dir:
sa.download_image_annotations(self.PROJECT_NAME, self.IMAGE_NAME, tmp_dir)
origin_annotation = json.load(open(f"{self.folder_path}/{self.IMAGE_NAME}___pixel.json"))
annotation = json.load(open(join(tmp_dir, f"{self.IMAGE_NAME}___pixel.json")))
self.assertEqual(
[i["attributes"] for i in annotation["instances"]],
[i["attributes"] for i in origin_annotation["instances"]]
)
class TestAnnotationUploadPixelSingle(BaseTestCase):
PROJECT_NAME = "TestAnnotationUploadPixelSingle"
PROJECT_DESCRIPTION = "Desc"
PROJECT_TYPE = "Pixel"
S3_FOLDER_PATH = "sample_project_pixel"
TEST_FOLDER_PATH = "data_set/sample_project_vector"
IMAGE_NAME = "example_image_1.jpg"
TEST_FOLDER_PATH_PIXEL = "data_set/sample_project_pixel"
@property
def folder_path_pixel(self):
return os.path.join(Path(__file__).parent.parent.parent, self.TEST_FOLDER_PATH_PIXEL)
@pytest.mark.flaky(reruns=2)
@patch("lib.core.usecases.annotations.UploadAnnotationUseCase.s3_bucket")
def test_annotation_upload_pixel(self, s3_bucket):
annotation_path = join(self.folder_path_pixel, f"{self.IMAGE_NAME}___pixel.json")
sa.upload_image_to_project(self.PROJECT_NAME, join(self.folder_path_pixel, self.IMAGE_NAME))
sa.upload_image_annotations(self.PROJECT_NAME, self.IMAGE_NAME, annotation_path)
self.assertEqual(len(s3_bucket.method_calls), 2)
|
#!python
if __name__ == '__main__':
print("b defined")
print("c")
print("c")
|
# -*- coding: utf-8 -*-
from openprocurement.api.validation import validate_data, validate_json_data
from .utils import update_logging_context, raise_operation_error
from openprocurement.api.validation import ( # noqa: F401
validate_file_upload, # noqa forwarded import
validate_document_data, # noqa forwarded import
validate_change_status, # noqa forwarded import
validate_patch_document_data, # noqa forwarded import
validate_items_uniq, # noqa forwarded import
)
def validate_lot_data(request, error_handler, **kwargs):
update_logging_context(request, {'lot_id': '__new__'})
data = validate_json_data(request)
model = request.lot_from_data(data, create=False)
if not any([request.check_accreditation(acc) for acc in iter(str(model.create_accreditation))]):
request.errors.add('body', 'accreditation',
'Broker Accreditation level does not permit lot creation')
request.errors.status = 403
raise error_handler(request.errors)
data = validate_data(request, model, "lot", data=data)
if data and data.get('mode', None) is None and request.check_accreditation('t'):
request.errors.add('body', 'mode', 'Broker Accreditation level does not permit lot creation')
request.errors.status = 403
raise error_handler(request)
def validate_post_lot_role(request, error_handler, **kwargs):
if request.authenticated_role in ('convoy', 'concierge'):
request.errors.add('body', 'accreditation', 'Can\'t create lot as bot')
request.errors.status = 403
raise error_handler(request)
def validate_patch_lot_data(request, error_handler, **kwargs):
data = validate_json_data(request)
editing_roles = request.content_configurator.available_statuses[request.context.status]['editing_permissions']
if request.authenticated_role not in editing_roles:
msg = 'Can\'t update {} in current ({}) status'.format(request.validated['resource_type'],
request.context.status)
raise_operation_error(request, error_handler, msg)
default_status = type(request.lot).fields['status'].default
if data.get('status') == default_status and data.get('status') != request.context.status:
raise_operation_error(request, error_handler, 'Can\'t switch lot to {} status'.format(default_status))
return validate_data(request, type(request.lot), data=data)
def validate_lot_document_update_not_by_author_or_lot_owner(request, error_handler, **kwargs):
if request.authenticated_role != (request.context.author or 'lot_owner'):
request.errors.add('url', 'role', 'Can update document only author')
request.errors.status = 403
raise error_handler(request)
def validate_update_item_in_not_allowed_status(request, error_handler, **kwargs):
status = request.validated['lot_status']
editing_statuses = request.content_configurator.item_editing_allowed_statuses
if status not in editing_statuses:
raise_operation_error(request, error_handler,
'Can\'t update item in current ({}) lot status'.format(status))
|
import unittest
import os
import numpy as np
import sys
# Add .. to the PYTHONPATH
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
from kaldi10feat.read_wave import *
# We'll also test computation of mel features on this .
from kaldi10feat.mel import *
class TestReadWave(unittest.TestCase):
def test_read_wave(self):
(samprate, data) = read_wave_file("temp.wav")
mel_computer = MelFeatureComputer(samprate)
# use channel 0 only.
feats = mel_computer.compute(data[0,:])
print("Feats are: {} {}".format(feats.shape, feats))
if __name__ == "__main__":
unittest.main()
|
import unittest
from test import test_support
class Empty:
def __repr__(self):
return '<Empty>'
class Coerce:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<Coerce %s>' % self.arg
def __coerce__(self, other):
if isinstance(other, Coerce):
return self.arg, other.arg
else:
return self.arg, other
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
class ComparisonTest(unittest.TestCase):
set1 = [2, 2.0, 2L, 2+0j, Coerce(2), Cmp(2.0)]
set2 = [[1], (3,), None, Empty()]
candidates = set1 + set2
def test_comparisons(self):
for a in self.candidates:
for b in self.candidates:
if ((a in self.set1) and (b in self.set1)) or a is b:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
def test_id_comparisons(self):
# Ensure default comparison compares id() of args
L = []
for i in range(10):
L.insert(len(L)//2, Empty())
for a in L:
for b in L:
self.assertEqual(cmp(a, b), cmp(id(a), id(b)),
'a=%r, b=%r' % (a, b))
def test_main():
test_support.run_unittest(ComparisonTest)
if __name__ == '__main__':
test_main()
|
token="2059760690:AAG9FiJ0Vvx59__Pj8w1SA_7s-Fzqz99RvA"
userId="293682875"
|
#!/usr/bin/env python3
'''
MIT No Attribution
Copyright Amazon Web Services
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from configparser import ConfigParser
import logging as log
import psycopg2
import os
import datetime
from random import randint
CMD_CREATE_TABLE = 'create_table'
CMD_TABLE_EXISTS = 'table_exists'
CMD_DELETE_TABLE = 'delete_table'
CMD_ENTER_RANDOM_DATA = 'enter_random_data'
CMD_GET_DATA = 'get_data'
cmd_list = [
CMD_CREATE_TABLE,
CMD_DELETE_TABLE,
CMD_TABLE_EXISTS,
CMD_ENTER_RANDOM_DATA,
CMD_GET_DATA
]
MAX_PERCENT = 100
ALARM_ID_COUNT = 10
ALARM_ID_0 = 'SMOKE'
ALARM_ID_1 = 'FIRE'
ALARM_ID_2 = 'CO2'
ALARM_ID_3 = 'CO'
ALARM_ID_4 = 'Cl2'
ALARM_ID_5 = 'SO2'
ALARM_ID_6 = 'H20'
ALARM_ID_7 = 'C2N2'
ALARM_ID_8 = 'CNCI'
ALARM_ID_9 = 'AsF5'
alarm_ids = [
ALARM_ID_0,
ALARM_ID_1,
ALARM_ID_2,
ALARM_ID_3,
ALARM_ID_4,
ALARM_ID_5,
ALARM_ID_6,
ALARM_ID_7,
ALARM_ID_8,
ALARM_ID_9
]
class sql_db():
def __init__(self, database_ini_pathname):
self.db_config_dict = self.db_config(database_ini_pathname)
log.info('db_config_dict = {}'.format(str(self.db_config_dict)))
self.table_config_dict = self.table_config(database_ini_pathname)
log.info('table_config_dict = {}'.format(str(self.table_config_dict)))
def db_config(self, database_ini_pathname):
db_dict = {}
cf = ConfigParser()
cf.optionxform = str
section = 'postgressql'
if os.path.exists(database_ini_pathname):
cf.read(database_ini_pathname)
try:
db_dict = dict(cf.items(section))
except Exception as e:
log.error('ConfigPaser error: {}'.format(str(e)))
else:
log.error('Missing database ini file = {}'.format(database_ini_pathname))
return db_dict
def table_config(self, database_ini_pathname):
table_dict = {}
cf = ConfigParser()
cf.optionxform = str
section = 'table'
if os.path.exists(database_ini_pathname):
cf.read(database_ini_pathname)
try:
table_dict = dict(cf.items(section))
except Exception as e:
log.error('ConfigPaser error: {}'.format(str(e)))
else:
log.error('Missing database ini file = {}'.format(database_ini_pathname))
return table_dict
def gen_random_values(self, tab_name, prime_key, fields):
'''
If table name, primary key, or columns change, then modify this function for
the new schema.
'''
value_str = ''
prime_key_value = ''
if tab_name == 'alarms':
if prime_key == 'utc':
prime_key_value = '{}'.format(datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
first = True
for field in fields:
if field == 'utc':
value = prime_key_value
elif field == 'alarm_id':
id_index = randint(0, ALARM_ID_COUNT)
value = alarm_ids[id_index]
elif field == 'alarm_value':
value = randint(0, MAX_PERCENT)
else:
log.error('Unexpected field value')
value_str = ''
prime_key_value = ''
break
if first:
first = False
value_str += '\'{}\''.format(value)
else:
value_str += ', \'{}\''.format(value)
log.info('value str = {}'.format(value_str))
return prime_key_value, value_str
def create_command_str(self, command):
rc = ''
if command in cmd_list:
tab_dict = self.table_config_dict
tab_name = tab_dict.get('name', None)
prime_key = tab_dict.get('prime_key', None)
field_count = tab_dict.get('field_count', None)
fc = int(field_count)
fields = []
for i in range(fc):
key_value = 'field{}'.format(i)
fields.append(tab_dict.get(key_value, 'MISSING'))
'''
# Save log.info() for debugging
log.info('tab_name = {}'.format(tab_name))
log.info('prime_key = {}'.format(prime_key))
log.info('fields = {}'.format(fields))
log.info('field_count = {}'.format(fc))
'''
if command == CMD_TABLE_EXISTS:
rc = 'select exists(select * from information_schema.tables where table_name=\'{}\')'.\
format(tab_name)
elif command == CMD_CREATE_TABLE:
rc = 'CREATE TABLE {} ( {} TEXT PRIMARY KEY, '.format(tab_name, prime_key)
for i in range(1,fc-1):
rc += '{} TEXT, '.format(fields[i])
rc += '{} TEXT) '.format(fields[fc-1])
elif command == CMD_DELETE_TABLE:
rc = 'DROP TABLE {};'.\
format(tab_name)
elif command == CMD_ENTER_RANDOM_DATA:
prime_key_value, value_str = self.gen_random_values(tab_name, prime_key, fields)
rc = 'INSERT INTO {} values({});'.\
format(tab_name, value_str)
elif command == CMD_GET_DATA:
utc_now_str = tab_dict.get('utc_now_str', None)
# TBD:
# Tried just getting alarms from datetime but did not work.
# Now just get all the data, then sort out later.
# Not practical for a real world implementation.
rc = 'SELECT * FROM {} where \'{}\' > \'{}\';'.\
format(tab_name, prime_key, utc_now_str)
return rc
def db_exists(self):
rc = False
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
cur = conn.cursor()
cur.execute('SELECT version()')
db_version = cur.fetchone()
log.info('PostgreSQL database version: {}'.format(db_version))
rc = True
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
def table_exists(self):
rc = False
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
command = self.create_command_str(CMD_TABLE_EXISTS)
log.info('table_exists command = {}'.format(command))
cur = conn.cursor()
cur.execute(command)
rc = cur.fetchone()[0]
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
def create_table(self):
rc = False
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
command = self.create_command_str(CMD_CREATE_TABLE)
log.info('create table command = {}'.format(command))
cur = conn.cursor()
cur.execute(command)
cur.close()
cur = None
conn.commit()
rc = True
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
def delete_table(self):
rc = False
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
command = self.create_command_str(CMD_DELETE_TABLE)
log.info('delete table command = {}'.format(command))
cur = conn.cursor()
cur.execute(command)
cur.close()
cur = None
conn.commit()
rc = True
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
def add_random_data(self):
rc = False
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
command = self.create_command_str(CMD_ENTER_RANDOM_DATA)
log.info('create table command = {}'.format(command))
cur = conn.cursor()
cur.execute(command)
cur.close()
cur = None
conn.commit()
rc = True
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
def get_data_since(self, utc_now_str):
rc = []
cur = None
conn = None
try:
conn = psycopg2.connect(**self.db_config_dict)
self.table_config_dict.update({'utc_now_str':utc_now_str})
command = self.create_command_str(CMD_GET_DATA)
log.info('get data command = {}'.format(command))
cur = conn.cursor()
cur.execute(command)
rc = cur.fetchall()
cur.close()
cur = None
except (Exception, psycopg2.DatabaseError) as error:
log.error('Error = {}'.format(error))
finally:
if conn is not None:
conn.close()
if cur is not None:
cur.close()
return rc
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Mock-up EoS module for testing generic property packages
"""
from pyomo.environ import Var, sqrt, units as pyunits
from idaes.models.properties.modular_properties.eos.eos_base import EoSBase
class DummyEoS(EoSBase):
# Add attribute indicating support for electrolyte systems
electrolyte_support = True
@staticmethod
def common(b, pobj):
# Create dummy var to be returned by expression calls
# This Var is used to create expressions where required.
if not hasattr(b, "dummy_var"):
b.dummy_var = Var(initialize=42)
# Counter for how many times this method is called
# This is used to ensure that the method has been called by checking
# that the counter has advanced
if hasattr(b, "eos_common"):
b.eos_common += 1
else:
b.eos_common = 1
@staticmethod
def calculate_scaling_factors(b, pobj):
pass
@staticmethod
def build_parameters(b):
if not hasattr(b, "dummy_param"):
b.dummy_param = Var(initialize=42)
@staticmethod
def act_phase_comp(b, p, j):
return 42
@staticmethod
def act_coeff_phase_comp(b, p, j):
return 1
@staticmethod
def compress_fact_phase(b, p):
return 42
@staticmethod
def cp_mol_phase(b, p):
return 42
@staticmethod
def cp_mol_phase_comp(b, p, j):
return 42
@staticmethod
def cv_mol_phase(b, p):
return 42
@staticmethod
def cv_mol_phase_comp(b, p, j):
return 42
@staticmethod
def dens_mass_phase(b, p):
return 42
@staticmethod
def dens_mol_phase(b, p):
return 55e3 * pyunits.mol / pyunits.m**3
@staticmethod
def energy_internal_mol_phase(b, p):
return 2e2 * b.temperature
@staticmethod
def energy_internal_mol_phase_comp(b, p, j):
return 2e2 * b.temperature
@staticmethod
def enth_mol_phase(b, p):
return 1e2 * b.temperature
@staticmethod
def enth_mol_phase_comp(b, p, j):
return 1e2 * b.temperature
@staticmethod
def entr_mol_phase(b, p):
return 42
@staticmethod
def entr_mol_phase_comp(b, p, j):
return 42
@staticmethod
def fug_phase_comp(b, p, j):
return 42
@staticmethod
def fug_coeff_phase_comp(b, p, j):
return 42
@staticmethod
def gibbs_mol_phase(b, p):
return 42
@staticmethod
def gibbs_mol_phase_comp(b, p, j):
return 42
@staticmethod
def isothermal_speed_sound_phase(b, p):
return 250
@staticmethod
def isentropic_speed_sound_phase(b, p):
return sqrt(b.heat_capacity_ratio_phase[p]) * b.isothermal_speed_sound_phase[p]
@staticmethod
def vol_mol_phase(b, p):
return 42
@staticmethod
def vol_mol_phase_comp(b, p, j):
return 42
@staticmethod
def log_act_phase_comp(b, p, j):
return 1
@staticmethod
def log_act_phase_solvents(b, p):
return 1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from functools import wraps
WithABCMeta = ABCMeta(str('WithABCMeta'), (object,), {})
class Disposable(WithABCMeta):
'''
Exposes method to release resources held by the class.
'''
_disposed = False
def dispose(self):
'''
Disposes of resources that are owned by the object.
.. note:: This method is idempotent.
'''
if not self._disposed:
self.perform_dispose()
self._disposed = True
@abstractmethod
def perform_dispose(self):
'''
Performs actual disposing, needs to be overridden by a subclass.
.. note:: This method is not supposed to be called directly by the user code. Please
use :py:meth:`dispose` instead.
'''
def __del__(self):
self.dispose()
def gather(owner, get_first, get_next):
elements = []
current = get_first(owner)
while current:
elements.append(current)
current = get_next(current)
return tuple(elements)
def nativerepr(repr_function):
'''
__repr__ decorator that makes sure __repr__ returns result of the right type (byte string
for Python 2.x, (unicode) string for Python 3). Performs conversion from unicode to byte
string when necessary.
'''
@wraps(repr_function)
def wrapper(self):
result = repr_function(self)
if str is not bytes:
assert isinstance(result, str), 'Always return (unicode) string from __repr__'
else:
assert isinstance(result, basestring)
if not isinstance(result, str):
result = result.encode('utf-8')
return result
return wrapper
def autoassign(function):
'''
Decorator for instance methods, copies all keyword arguments to instance members
and then calls the decorated method passing all the parameters.
.. note:: Decorated method accepts only keyword arguments.
'''
@wraps(function)
def wrapper(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return function(self, **kwargs)
return wrapper
class ReprMixin(object):
repr_members = ()
@nativerepr
def __repr__(self):
components = ((member, getattr(self, member)) for member in self.repr_members)
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % kv for kv in components)
)
|
#!/usr/bin/python
from app import app
app.run(host="0.0.0.0", port=5000, debug=True)
|
from datetime import datetime
from liebraryrest.database import db, Model
from sqlalchemy import UniqueConstraint
class User(Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(50), nullable=False, index=True)
bookings = db.relationship('Booking', backref='user', lazy="dynamic")
loans = db.relationship('Loan', backref='user', lazy="dynamic")
def __init__(self, nickname):
self.nickname = nickname
@classmethod
def get_by_id(cls, user_id):
return cls.query.get(int(user_id))
def serialize(self, includes=None):
d = super().serialize()
d.pop('bookings')
d.pop('loans')
return d
class Author(Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False, index=True)
birth_date = db.Column(db.DateTime, nullable=False)
books = db.relationship('Book', backref='author', lazy="dynamic")
def __init__(self, first_name, last_name, birth_date):
self.name = "{0} {1}".format(first_name, last_name)
self.birth_date = birth_date
@classmethod
def get_by_id(cls, author_id):
return cls.query.get(int(author_id))
def serialize(self, includes=None):
d = super().serialize()
d['birth_date'] = d['birth_date'].isoformat()
if (includes is not None) and 'books' in includes:
d['books'] = Book.serialize_list(self.books.all())
else:
d.pop('books')
return d
class Book(Model):
isbn = db.Column(db.String(13), primary_key=True)
title = db.Column(db.Text, nullable=False, index=True)
abstract = db.Column(db.Text, nullable=True)
pages = db.Column(db.Integer, nullable=True)
publisher = db.Column(db.String(250), nullable=True)
quantity = db.Column(db.Integer, default=1)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'), nullable=False)
bookings = db.relationship('Booking', backref='book', lazy="dynamic")
def __init__(self, isbn, title, author, abstract=None,
pages=None, publisher=None, quantity=1):
self.isbn = isbn
self.title = title
self.author = author
self.abstract = abstract
self.pages = pages
self.publisher = publisher
self.quantity = quantity
@classmethod
def get_by_isbn(cls, book_isbn):
return cls.query.get(book_isbn)
def serialize(self, includes=None):
d = super().serialize()
if (includes is not None) and 'author' in includes:
d['author'] = Book.serialize_list(self.author)
else:
d.pop('author')
d.pop('bookings')
return d
def is_available(self):
return self.quantity > 0
class Booking(Model):
__table_args__ = (UniqueConstraint('book_isbn', 'user_id', name='_book_isbn_user_id'),)
id = db.Column(db.Integer, primary_key=True)
book_isbn = db.Column(db.String(13), db.ForeignKey('book.isbn'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
def __init__(self, book, user):
self.book = book
self.user = user
self.created_at = datetime.now()
def serialize(self, includes=None):
return {
'id': self.id,
'isbn': self.book.isbn,
'user_id:': self.user.id,
'created_at': self.created_at.isoformat()
}
@classmethod
def get_by_isbn_and_user_id(cls, book_isbn, user_id):
return cls.query.filter(Booking.book_isbn == book_isbn, Booking.user_id == user_id).first()
class Loan(Model):
__table_args__ = (UniqueConstraint('book_isbn', 'user_id', name='_book_isbn_user_id_on_loan'),)
id = db.Column(db.Integer, primary_key=True)
book_isbn = db.Column(db.String(13), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
booking_id = db.Column(db.Integer, db.ForeignKey('booking.id'))
started_at = db.Column(db.DateTime, nullable=False)
finished_at = db.Column(db.DateTime, nullable=True)
# define relationship
booking = db.relationship('Booking', backref='booking')
def __init__(self, booking):
self.book_isbn = booking.book_isbn
self.user_id = booking.user_id
self.booking = booking
self.started_at = datetime.now()
def serialize(self, includes=None):
d = super().serialize()
return {
'id': self.id,
'book_isbn': self.book_isbn,
'user_id:': self.user_id,
'booking_id': self.booking.id,
'started_at': self.started_at.isoformat()
}
@classmethod
def get_by_booking_id(cls, booking_id):
return cls.query.filter(Loan.booking_id == booking_id).first()
@classmethod
def get_by_isbn_and_user_id(cls, isbn, user_id):
return cls.query.filter(Loan.book_isbn == isbn, Loan.user_id == user_id).first()
|
import statsmodels.api as sm
import statsmodels.formula.api as smf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
'''
AQ = pd.read_csv('Beijing.csv')#read data
dic = {1: "Winter",
2: "Winter",
3: "Spring",
4: "Spring",
5: "Spring",
6: "Summer",
7: "Summer",
8: "Summer",
9: "Fall",
10: "Fall",
11: "Fall",
12: "Winter"}
AQ['season'] = AQ['month'].map(dic)
AQ = AQ.dropna()
AQ = AQ[AQ['pm2.5'] > 0] #remove unresonable response values
AQ['pm25_log'] = np.log(AQ['pm2.5']) #do the log transformation on the response variable
# remove the outliers
AQ_cv = AQ[AQ['cbwd'] == 'cv']
AQ_cv = AQ_cv[(AQ_cv['pm25_log'] > 2.2) & (AQ_cv['pm25_log'] < 6.8)]
AQ_NE = AQ[AQ['cbwd'] == 'NE']
AQ_NE = AQ_NE[(AQ_NE['pm25_log'] > 0.5)]
AQ_NW = AQ[AQ['cbwd'] == 'NW']
AQ_NW = AQ_NW[(AQ_NW['pm25_log'] > 0.5)]
AQ_SE = AQ[AQ['cbwd'] == 'SE']
AQ_SE.sort_values(['pm25_log'], ascending=[False])
AQ_SE = AQ_SE[(AQ_SE['pm25_log'] > 0.5) & (AQ_SE['pm25_log'] < 6.291569)]
AQ_new = pd.concat([AQ_cv, AQ_NE, AQ_NW, AQ_SE])
#fit the model
mixed = smf.mixedlm("pm25_log ~ year+month+day+hour+DEWP+TEMP+PRES+Is+Ir", AQ_new, groups = AQ_new["cbwd"], re_formula="~hour+PRES")
mixed_fit = mixed.fit()
#print the summary
print(mixed_fit.summary())
model = mixed_fit
plt.scatter(AQ_new['pm25_log'] - model.resid, model.resid, alpha = 0.5)
plt.title("Residual vs. Fitted")
plt.xlabel("Fitted Values")
plt.ylabel("Residuals")
plt.show()
'''
def add5(column):
updated = column
for i in range(0, len(updated)):
updated[i] = updated[i] + 5
return updated
data = pd.read_csv("output_files/summary.csv")
modelData = data
#Remove string data
del modelData['id']
#del modelData['gender']
del modelData['countryOfOrigin']
modelData['finalGoalAchievement'] = modelData['finalGoalAchievement'].astype(int)
modelData['milestoneAchievement'] = modelData['milestoneAchievement'].astype(int)
modelData['futureWork'] = modelData['futureWork'].astype(int)
modelData['finalGoalAchievementWithoutGoalChange'] = modelData['finalGoalAchievementWithoutGoalChange'].astype(int)
'''
modelData['agitatedCalmChange'] = modelData['agitatedCalmChange'] + 5
modelData['quiescentSurprisedChange'] = modelData['quiescentSurprisedChange'] + 5
modelData['motivationDuringAfterChange'] = modelData['motivationDuringAfterChange'] + 5
modelData['anxiousRelaxedChange'] = modelData['anxiousRelaxedChange'] + 5
modelData['efficacyChange'] = modelData['efficacyChange'] + 5
'''
dependentVariable = "finalGoalAchievementWithoutGoalChange"
mixed = smf.mixedlm(dependentVariable + " ~"
" milestoneAchievement+"
"milestoneAdherence+"
"agreementPercentage+"
"futureWork+"
"priorEfficacy+"
"postEfficacy+"
"efficacyChange+"
"education+"
"fakeNatural+"
"machineHuman+"
"consciousUnconscious+"
"artificialLifelike+"
"rigidElegant+"
"deadAlive+"
"stagnantLively+"
"mechanicalOrganic+"
"inertInteractive+"
"apatheticResponsive+"
"dislikeLike+"
"unfriendlyFriendly+"
"unkindKind+"
"unpleasantPleasant+"
"awfulNice+"
"incompetentCompetent+"
"ignorantKnowledgeable+"
"irresponsibleResponsible+"
"unintelligentIntelligent+"
"foolishSensible+"
"anxiousRelaxedBefore+"
"agitatedCalmBefore+"
"quiescentSurprisedBefore+"
"anxiousRelaxedAfter+"
"agitatedCalmAfter+"
"quiescentSurprisedAfter+"
"anxiousRelaxedChange+"
"agitatedCalmChange+"
"quiescentSurprisedChange+"
"typeOfRelationship+"
"usefulOrNotDiabetes+"
"usefulOrNotObesity+"
"convenience+"
"preference+"
"motivationBefore+"
"motivation+"
"motivationAfter+"
"motivationBeforeDuringChange+"
"motivationDuringAfterChange+"
"engagement+"
"autonomy+"
"positiveNegative+"
"similarSystem+"
"duration+"
"numberOfSessions+"
"understandingDiabetes", modelData, groups = modelData["condition"],
re_formula="~firstTime+"
"familyHistoryDiabetes+"
"firstTime+"
"gender+"
"age+"
"diabetes")
mixed_fit = mixed.fit()
#print the summary
print(mixed_fit.summary())
model = mixed_fit
plt.scatter(data[dependentVariable] - model.resid_working, model.resid_working, alpha = 0.5)
plt.title("Residual vs. Fitted for Final Goal Achievement without Goal Change")
plt.xlabel("Fitted Values")
plt.ylabel("Residuals")
plt.show()
|
"""
Command line utility to trigger indexing of bundles from DSS into Azul
"""
import argparse
from collections import (
defaultdict,
)
import fnmatch
import logging
import sys
from typing import (
List,
)
from args import (
AzulArgumentHelpFormatter,
)
from azul import (
config,
require,
)
from azul.azulclient import (
AzulClient,
)
from azul.bigquery_reservation import (
SlotManager,
)
from azul.logging import (
configure_script_logging,
)
from azul.plugins.repository import (
tdr,
)
logger = logging.getLogger(__name__)
defaults = AzulClient()
parser = argparse.ArgumentParser(description=__doc__, formatter_class=AzulArgumentHelpFormatter)
parser.add_argument('--prefix',
metavar='HEX',
default=config.dss_query_prefix,
help='A bundle UUID prefix. This must be a sequence of hexadecimal characters. Only bundles whose '
'UUID starts with the given prefix will be indexed. If --partition-prefix-length is given, '
'the prefix of a partition will be appended to the prefix specified with --prefix.')
parser.add_argument('--workers',
metavar='NUM',
dest='num_workers',
default=defaults.num_workers,
type=int,
help='The number of workers that will be sending bundles to the indexer concurrently')
parser.add_argument('--partition-prefix-length',
metavar='NUM',
default=0,
type=int,
help='The length of the bundle UUID prefix by which to partition the set of bundles matching the '
'query. Each query partition is processed independently and remotely by the indexer lambda. '
'The lambda queries the DSS and queues a notification for each matching bundle. If 0 (the '
'default) no partitioning occurs, the DSS is queried locally and the indexer notification '
'endpoint is invoked for each bundle individually and concurrently using worker threads. '
'This is magnitudes slower that partitioned indexing.')
parser.add_argument('--catalogs',
nargs='+',
metavar='NAME',
default=[
c for c in config.catalogs
if c not in config.integration_test_catalogs
],
choices=config.catalogs,
help='The names of the catalogs to reindex.')
parser.add_argument('--sources',
default=config.reindex_sources,
nargs='+',
help='Limit remote reindexing to a subset of the configured sources. '
'Supports shell-style wildcards to match multiple sources per argument. '
'Must be * for local reindexing i.e., if --partition-prefix-length is not given.')
parser.add_argument('--delete',
default=False,
action='store_true',
help='Delete all Elasticsearch indices in the current deployment. '
'Implies --create when combined with --index.')
parser.add_argument('--index',
default=False,
action='store_true',
help='Index all matching metadata in the configured repository. '
'Implies --create when combined with --delete.')
parser.add_argument('--create',
default=False,
action='store_true',
help='Create all Elasticsearch indices in the current deployment. '
'Implied when --delete and --index are given.')
parser.add_argument('--purge',
default=False,
action='store_true',
help='Purge the queues before taking any action on the indices.')
parser.add_argument('--nowait', '--no-wait',
dest='wait',
default=True,
action='store_false',
help="Don't wait for queues to empty before exiting script.")
parser.add_argument('--verbose',
default=False,
action='store_true',
help='Enable verbose logging')
parser.add_argument('--no-slots',
dest='manage_slots',
default=True,
action='store_false',
help='Suppress management of BigQuery slot commitments.')
def main(argv: List[str]):
args = parser.parse_args(argv)
if args.verbose:
config.debug = 1
configure_script_logging(logger)
azul = AzulClient(num_workers=args.num_workers)
source_globs = set(args.sources)
if args.partition_prefix_length:
sources_by_catalog = defaultdict(set)
globs_matched = set()
for catalog in args.catalogs:
sources = azul.catalog_sources(catalog)
for source_glob in source_globs:
matches = fnmatch.filter(sources, source_glob)
if matches:
globs_matched.add(source_glob)
logger.debug('Source glob %r matched sources %r in catalog %r',
source_glob, matches, catalog)
sources_by_catalog[catalog].update(matches)
unmatched = source_globs - globs_matched
if unmatched:
logger.warning('Source(s) not found in any catalog: %r', unmatched)
require(any(sources_by_catalog.values()),
'No valid sources specified for any catalog')
else:
if source_globs == {'*'}:
sources_by_catalog = {
catalog: azul.catalog_sources(catalog)
for catalog in args.catalogs
}
else:
parser.error('Cannot specify sources when performing a local reindex')
assert False
azul.reset_indexer(args.catalogs,
purge_queues=args.purge,
delete_indices=args.delete,
create_indices=args.create or args.index and args.delete)
if args.index:
logger.info('Queuing notifications for reindexing ...')
slot_manager = None
num_notifications = 0
for catalog, sources in sources_by_catalog.items():
if sources:
if (
args.manage_slots
and slot_manager is None
and isinstance(azul.repository_plugin(catalog), tdr.Plugin)
):
slot_manager = SlotManager()
slot_manager.ensure_slots_active()
if args.partition_prefix_length:
azul.remote_reindex(catalog, args.prefix, args.partition_prefix_length, sources)
num_notifications = None
else:
num_notifications += azul.reindex(catalog, args.prefix)
else:
logger.info('Skipping catalog %r (no matching sources)', catalog)
if args.wait:
if num_notifications == 0:
logger.warning('No notifications for prefix %r and catalogs %r were sent',
args.prefix, args.catalogs)
else:
azul.wait_for_indexer()
if __name__ == "__main__":
main(sys.argv[1:])
|
#***************************************************************************
# Copyright Jaime Machuca
#***************************************************************************
# Title : sc_SonyQX1.py
#
# Description : This file contains a class to use the Sony QX range of cams
# It finds a camera using SSDP discovery and returns it as an
# object. If a camera is not found it returns an error value
# that should be catched by the application. Initially it will
# have support for triggering the camera, and downloading the
# latest image file. Other functions will be added gradually.
#
# Environment : Python 2.7 Code. Intended to be included in a Mavproxy Module
#
# Responsible : Jaime Machuca
#
# License : CC BY-NC-SA
#
# Editor Used : Xcode 6.1.1 (6A2008a)
#
#****************************************************************************
#****************************************************************************
# HEADER-FILES (Only those that are needed in this file)
#****************************************************************************
# System Header files and Module Headers
import os, sys, time, math, cv2, struct, fcntl
from datetime import datetime
# Module Dependent Headers
import requests, json, socket, StringIO
import xml.etree.ElementTree as ET
# Own Headers
import ssdp
#****************************************************************************
# Constants
#****************************************************************************
# Target Initial Camera Values
targetShutterSpeed = 1600
targetAperture = 120
targetISOValue = "AUTO"
#****************************************************************************
# Class name : SmartCamera_SonyQX
#
# Public Methods : boGetLatestImage
# u32GetImageCounter
# boTakePicture
# boSetExposureMode
# boSetShutterSpeed
# boSetAperture
# boSetISO
# boZoomIn
# boZoomOut
#
# Private Methods : __sFindInterfaceIPAddress
# __sFindCameraURL
# __sMakeCall
# __sSimpleCall
#****************************************************************************
class SmartCamera_SonyQX():
#****************************************************************************
# Method Name : __init__ Class Initializer
#
# Description : Initializes the class
#
# Parameters : u8instance Camera Instance Number
# snetInterface String containing the Network Interface
# Name where we should look for the cam
#
# Return Value : None
#
# Autor : Jaime Machuca, Randy Mackay
#
#****************************************************************************
def __init__(self, u8Instance, sNetInterface):
# record instance
self.u8Instance = u8Instance
self.sConfigGroup = "Camera%d" % self.u8Instance
# background image processing variables
self.u32ImgCounter = 0 # num images requested so far
# latest image captured
self.sLatestImageURL = None # String with the URL to the latest image
# latest image downloaded
self.sLatestImageFilename = None #String with the Filename for the last downloaded image
self.sLatestFileName = None #String with the camera file name for the last image taken
self.vehicleLat = 0.0 # Current Vehicle Latitude
self.vehicleLon = 0.0 # Current Vehicle Longitude
self.vehicleHdg = 0.0 # Current Vehicle Heading
self.vehicleAMSL = 0.0 # Current Vehicle Altitude above mean sea level
self.vehicleRoll = 0.0 # Current Vehicle Roll
self.vehiclePitch = 0.0 # Current Vehicle Pitch
# Look Camera and Get URL
self.sCameraURL = self.__sFindCameraURL(sNetInterface)
if self.sCameraURL is None:
print("No QX camera found, failed to open QX camera %d" % self.u8Instance)
else:
self.__openGeoTagLogFile() # open geoTag Log
self.boCameraInitialSetup() # Setup Initial camera parameters
#****************************************************************************
# Method Name : __str__
#
# Description : Returns a human readable string name for the instance
#
# Parameters : none
#
# Return Value : String with object instance name
#
# Autor : Randy Mackay
#
#****************************************************************************
# __str__ - print position vector as string
def __str__(self):
return "SmartCameraSonyQX Object for %s" % self.sConfigGroup
#****************************************************************************
# Method Name : boCameraInitialSetup
#
# Description : Sets Initial Camera Parameters
#
# Parameters : None
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boCameraInitialSetup(self):
print("Setting up Camera Initial Parameters")
# Check if we need to do 'startRecMode'
APIList = self.__sSimpleCall("getAvailableApiList")
# For those cameras which need it
if 'startRecMode' in (APIList['result'])[0]:
print("Need to send startRecMode, sending and waiting 5 sec...")
self.__sSimpleCall("startRecMode")
time.sleep(1)
print("4 sec")
time.sleep(1)
print("3 sec")
time.sleep(1)
print("2 sec")
time.sleep(1)
print("1 sec")
time.sleep(1)
# Set Postview Size to Orignial size to get real image filename
sResponse = self.__sSimpleCall("setPostviewImageSize", adictParams=["Original"])
# Set Mode to Shutter Priority if available
SupportedModes = self.__sSimpleCall("getSupportedExposureMode")
if 'Shutter' in (SupportedModes['result'])[0]:
self.boSetExposureMode("Shutter")
#elif 'Manual' in (SupportedModes['result'])[0]:
# self.boSetExposureMode("Manual")
else:
print("Error no Shutter Priority Mode")
# Set Target Shutter Speed
self.boSetShutterSpeed(targetShutterSpeed)
# Set Target ISO Value
self.boSetISO(targetISOValue)
#****************************************************************************
# Method Name : boSet_GPS
#
# Description : Gets the GPS Position from the provided message
#
# Parameters : mGPSMessage GPS Mavlink Message type
#
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSet_GPS(self, mGPSMessage):
if mGPSMessage.get_type() == 'GLOBAL_POSITION_INT':
(self.vehicleLat, self.vehicleLon, self.vehicleHdg, self.vehicleAMSL) = (m.lat*1.0e-7, m.lon*1.0e-7, m.hdg*0.01, m.alt*0.001)
#****************************************************************************
# Method Name : boSet_Attitude
#
# Description : Gets the vehicle attitude from the provided message
#
# Parameters : mAttitudeMessage MAVlink Attitude Message type
#
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSet_Attitude(self, mAttitudeMessage):
if mAttitudeMessage.get_type() == 'ATTITUDE':
(self.vehicleRoll, self.vehiclePitch) = (math.degrees(m.roll), math.degrees(m.pitch))
#****************************************************************************
# Method Name : __geoRef_write
#
# Description : Writes GeoReference to file
#
# Parameters : sImageFileName File name of image to be entered into the log
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
# Geo reference log for all the GoPro pictures
def __geoRef_write(self, sImageFileName):
#self.geoRef_writer.write(datetime.now().strftime('%d-%m-%Y %H:%M:%S.%f')[:-3])
self.geoRef_writer.write(sImageFileName)
self.geoRef_writer.write(",%f,%f,%f,%f,%f,%f" % (self.vehicleLat, self.vehicleLon, self.vehicleAMSL, self.vehicleRoll, self.vehiclePitch,self.vehicleHdg))
self.geoRef_writer.write('\n')
self.geoRef_writer.flush()
#****************************************************************************
# Method Name : get_real_Yaw
#
# Description : Helper method to get the real Yaw
#
# Parameters : yaw Vehicle Yaw
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def get_real_Yaw(self, yaw):
if (yaw < 0):
return yaw+360
return yaw
#****************************************************************************
# Method Name : __writeGeoRefToFile
#
# Description : Writes the Georeference of the image to the log. NOT SURE
# IF IT IS DUPLICATED FOR A GOOD REASON.
#
# Parameters : sImageFileName1 Image file name
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def __writeGeoRefToFile(self, sImageFileName1):
self.__geoRef_write(sImageFileName1)
#****************************************************************************
# Method Name : __openGeoTagLogFile
#
# Description : Checks for existing log files and creates a new Log file
# with an incremented index number
#
# Parameters : None
#
# Return Value : None
#
# Autor : Jaime Machuca
#
#****************************************************************************
def __openGeoTagLogFile(self):
#Open GeoTag Log File
i = 0
while os.path.exists('/sdcard/log/geoRef%s.log' % i):
print('checking /sdcard/log/geoRef%s.log' % i)
i += 1
self.geoRef_writer = open('/sdcard/log/geoRef%s.log' % i, 'w', 0)
self.geoRef_writer.write('Filename, Latitude, Longitude, Alt (AMSL), Roll, Pitch, Yaw\n')
print('Opened GeoTag Log File with Filename: geoRef%s.log' % i)
#****************************************************************************
# Method Name : __sFindInterfaceIPAddress
#
# Description : Gets the IP Address of the interface name requested
#
# Parameters : sInterfaceName
#
# Return Value : String with the IP Address for the requested interface
#
# Autor : Jaime Machuca,
#
#****************************************************************************
def __sFindInterfaceIPAddress(self,sInterfaceName):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', sInterfaceName[:15])
)[20:24])
#****************************************************************************
# Method Name : __sMakeCall
#
# Description : Sends a json encoded command to the QX camera URL
#
# Parameters : sService
# adictPayload
#
# Return Value : JSON encoded string with camera response
#
# Autor : Andrew Tridgell, Jaime Machuca
#
#****************************************************************************
def __sMakeCall(self, sService, adictPayload):
sURL = "%s/%s" % (self.sCameraURL, sService)
adictHeaders = {"content-type": "application/json"}
sData = json.dumps(adictPayload)
sResponse = requests.post(sURL,
data=sData,
headers=adictHeaders).json()
return sResponse
#****************************************************************************
# Method Name : __sSimpleCall
#
# Description : Articulates a camera service command to send to the QX
# camera
#
# Parameters : sMethod, command name as stated in Sony's API documentation
# sTarget, API Service type
# adictParams, command specific parameters (see Sony's API Documentation)
# u8Id, ??
# sVersion, API version for the command (see Sony's API Documentation)
#
# Return Value : JSON encoded string with camera response
#
# Autor : Andrew Tridgell, Jaime Machuca
#
#****************************************************************************
def __sSimpleCall(self, sMethod, sTarget="camera", adictParams=[], u8Id=1, sVersion="1.0"):
print("Calling %s" % sMethod)
return self.__sMakeCall(sTarget,
{ "method" : sMethod,
"params" : adictParams,
"id" : u8Id,
"version" : sVersion })
#****************************************************************************
# Method Name : __sFindCameraURL
#
# Description : Sends an SSDP request to look for a QX camera on the
# specified network interface
#
# Parameters : sInterface, String with the network interface name
#
# Return Value : String containing the URL for sending commands to the
# Camera
#
# Autor : Andrew Tridgell, Jaime Machuca
#
#****************************************************************************
def __sFindCameraURL(self, sInterface):
sSSDPString = "urn:schemas-sony-com:service:ScalarWebAPI:1";
sInterfaceIP = self.__sFindInterfaceIPAddress(sInterface)
print ("Interface IP Address: %s" % sInterfaceIP)
sRet = ssdp.discover(sSSDPString, if_ip=sInterfaceIP)
if len(sRet) == 0:
return None
sDMS_URL = sRet[0].location
print("Fetching DMS from %s" % sDMS_URL)
xmlReq = requests.request('GET', sDMS_URL)
xmlTree = ET.ElementTree(file=StringIO.StringIO(xmlReq.content))
for xmlElem in xmlTree.iter():
if xmlElem.tag == '{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL':
print("Found camera at %s" % xmlElem.text)
return xmlElem.text
return None
#****************************************************************************
# Method Name : boValidCameraFound
#
# Description : Returns weather or not a camera has been found. This
# should be used to try to find the camera again, or
# destroy the object.
#
# Parameters : none
#
# Return Value : True if camera has been found
# False if no camera has been found
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boValidCameraFound(self):
print ("Checking URL at %s" % self.sCameraURL)
if self.sCameraURL is None:
return False
return True
#****************************************************************************
# Method Name : boGetLatestImage
#
# Description : Dowloads the latest image taken by the camera and then
# saves it to a file name composed by the camera instance
# and image number.
#
# Parameters : none
#
# Return Value : True if it was succesful
# False if no image was downloaded
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boGetLatestImage(self):
self.sLatestImageFilename = '%s_image_%s.jpg' % (self.sConfigGroup,self.u32ImgCounter)
print ("Downloading, ",self.sLatestImageFilename)
imgReq = requests.request('GET', self.sLatestImageURL)
if imgReq is not None:
open(self.sLatestImageFilename, 'w').write(imgReq.content)
return True
return False
#****************************************************************************
# Method Name : sGetLatestImageFilename
#
# Description : Returns the filename of the last image downloaded from
# the camera
#
# Parameters : none
#
# Return Value : String containing the image file name
#
# Autor : Jaime Machuca
#
#****************************************************************************
def sGetLatestImageFilename(self):
return self.sLatestImageFilename
#****************************************************************************
# Method Name : u32GetImageCounter
#
# Description : Returns the number of images taken
#
# Parameters : none
#
# Return Value : Integer with the number of images
#
# Autor : Jaime Machuca
#
#****************************************************************************
def u32GetImageCounter(self):
return self.u32ImgCounter
#****************************************************************************
# Method Name : boZoomIn
#
# Description : Commands the camera to do a Zoom In step
#
# Parameters : None
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boZoomIn(self):
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("actZoom", adictParams=["in","1shot"])
# Check response for a succesful result
if 'result' in sResponse:
print ("Zoomed in")
return True
# In case of an error, return false
print ("Failed to Zoom")
return False
#****************************************************************************
# Method Name : boZoomOut
#
# Description : Commands the camera to do a Zoom In step
#
# Parameters : None
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boZoomOut(self):
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("actZoom", adictParams=["out","1shot"])
# Check response for a succesful result
if 'result' in sResponse:
print ("Zoomed out")
return True
# In case of an error, return false
print ("Failed to Zoom")
return False
#****************************************************************************
# Method Name : boSetExposureMode
#
# Description : Commands the camera to set a specific ShootingMode
#
# Parameters : Exposure Mode String
# Program Auto, Aperture, Shutter, Manual, Intelligent Auto, Superior Auto
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSetExposureMode(self,sExposureMode):
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("setExposureMode", adictParams=[sExposureMode])
# Check response for a succesful result
if 'result' in sResponse:
time.sleep(0.25)
sResponse = self.__sSimpleCall("getExposureMode")
if sExposureMode not in sResponse["result"]:
print ("Failed to set Exposure Mode, current value: %s" %sResponse["result"])
return False
print ("Exposure Mode set to %s" % sExposureMode)
return True
# In case of an error, return false
print ("Failed to set Exposure Mode")
return False
#****************************************************************************
# Method Name : boSetShutterSpeed
#
# Description : Commands the camera to set the Shutter Speed
#
# Parameters : Integer with the shutter speed divisor
# i.e. 1/1000 = 1000
# NOTE: This will only work for shutter speeds smaller than 1 sec
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSetShutterSpeed(self,u16ShutterSpeed):
# Create Shutter Speed String
sShutterSpeed = "1/%s" % str(u16ShutterSpeed)
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("setShutterSpeed", adictParams=[sShutterSpeed])
# Check response for a succesful result
if 'result' in sResponse:
time.sleep(0.25)
sResponse = self.__sSimpleCall("getShutterSpeed")
if sShutterSpeed not in sResponse["result"]:
print ("Failed to set Shutter Speed, current value: %s" %sResponse["result"])
return False
print ("Shutter Speed set to %s" % sShutterSpeed)
return True
# In case of an error, return false
print ("Failed to set Shutter Speed")
return False
#****************************************************************************
# Method Name : boSetAperture
#
# Description : Commands the camera to set a lens Apperture
#
# Parameters : F number * 10
# i.e. F 2.8 = 28
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSetAperture(self,u8Aperture):
# Create Aperture String (cast one of the numbers to float to get a float)
fFvalue = u8Aperture / float(10)
sFValue = str(fFvalue)
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("setFNumber", adictParams=[sFValue])
# Check response for a succesful result
if 'result' in sResponse:
time.sleep(0.25)
sResponse = self.__sSimpleCall("getFNumber")
if sFValue not in sResponse["result"]:
print ("Failed to set aperture, current value: %s" %sResponse["result"])
return False
print ("Aperture set to %s" % sFValue)
return True
# In case of an error, return false
print ("Failed to set aperture")
return False
#****************************************************************************
# Method Name : boSetISO
#
# Description : Commands the camera to set an ISO number
#
# Parameters : ISO Value
# 80, 100, 1000, 3200, etc...
#
# Return Value : True if succesful
# False if Error Recieved
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boSetISO(self,u16ISO):
# Create ISO String
sISO = str(u16ISO)
# Send command to set Exposure Mode
sResponse = self.__sSimpleCall("setIsoSpeedRate", adictParams=[sISO])
# Check response for a succesful result
if 'result' in sResponse:
sResponse = self.__sSimpleCall("getIsoSpeedRate")
if sISO not in sResponse["result"]:
print ("Failed to Set ISO, current value: %s" %sResponse["result"])
return False
print ("ISO set to %s" % sISO)
return True
# In case of an error, return false
print ("Failed to Set ISO")
return False
#****************************************************************************
# Method Name : __vAddGeotagToLog
#
# Description : Adds an entry to the log file with the name of the image
# and geoposition and orientation of the shot.
#
# Parameters : Image file name, position, orientation
#
# Return Value : True if succesful
# False if no URL was recieved for the image
#
# Autor : Jaime Machuca
#
#****************************************************************************
def __boAddGeotagToLog(self, sImageFileName):
self.__writeGeoRefToFile(sImageFileName)
return True
#****************************************************************************
# Method Name : boTakePicture
#
# Description : Commands the camera to take a picture
#
# Parameters : none
#
# Return Value : True if succesful
# False if no URL was recieved for the image
#
# Autor : Jaime Machuca
#
#****************************************************************************
def boTakePicture(self):
# Send command to take picture to camera
sResponse = self.__sSimpleCall("actTakePicture")
# Check response for a succesful result and save latest image URL
if 'result' in sResponse:
self.sLatestImageURL = sResponse['result'][0][0]
start = self.sLatestImageURL.find('DSC')
end = self.sLatestImageURL.find('JPG', start) + 3
self.sLatestFileName = self.sLatestImageURL[start:end]
print("image URL: %s" % self.sLatestImageURL)
print("image Name: %s" % self.sLatestFileName)
self.__boAddGeotagToLog(self.sLatestFileName)
self.u32ImgCounter = self.u32ImgCounter+1
return True
# In case of an error, return false
return False
#****************************************************************************
#
# Stuff Needed for testing and compatibility with current code.
#
#****************************************************************************
def take_picture(self):
return self.boTakePicture()
def get_latest_image(self):
self.boGetLatestImage()
# this reads the image from the filename, parameter is 1 color, 0 BW, -1 unchanged
return cv2.imread(self.sLatestImageFilename,1)
# main - tests SmartCameraWebCam class
def main(self):
while True:
# send request to image capture for image
if self.take_picture():
# display image
cv2.imshow ('image_display', self.get_latest_image())
else:
print "no image"
# check for ESC key being pressed
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# take a rest for a bit
time.sleep(0.01)
# run test run from the command line
if __name__ == "__main__":
sc_SonyQX1_0 = SmartCameraSonyQX1(0)
sc_SonyQX1_0.main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djshop.apps.offers.models import BundleOffer
from django import forms
# Bundle offer form
class BundleOfferForm(forms.ModelForm):
class Meta:
model = BundleOffer
fields = ["name", "description", "product", "bundle_product_units", "paid_product_units"]
# Product form
class DeleteBundleOfferForm(forms.Form):
confirmed = forms.BooleanField(label=u"Confirm you want to delete this offer")
|
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
from Actor import Actor
from Critic import Critic
from Noise import OUActionNoise, OUNoise
from Buffer import Buffer
problem="MountainCarContinuous-v0"
env = gym.make(problem)
num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print("Size of State Space -> {}".format(num_states))
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
#Size of State Space -> 2
#Size of Action Space -> 1
#Max Value of Action -> 1.0
#Min Value of Action -> -1.0
# returns a action sampled from our Actor adding noise
def get_policy_action(current_state, actor_model, noise_object):
# get sampled actions
actions = actor_model(current_state)
pactions = actions
noise = noise_object.sample()
# Add noise to action
actions = actions.numpy() + noise
pureactions = pactions.numpy()
# We make sure action is within bounds
pure_actions = np.clip(pureactions, lower_bound, upper_bound)
legal_action = np.clip(actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)], [np.squeeze(pure_actions)]
std_dev = 0.25
exploration_mu = 0
exploration_theta = 0.05
exploration_sigma = std_dev
#ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
ou_noise = OUNoise(num_actions, exploration_mu, exploration_theta, exploration_sigma)
actor_model = Actor(num_states, upper_bound)
actor_model = actor_model.get_model()
critic_model = Critic(num_states, num_actions)
critic_model = critic_model.get_model()
target_actor_model = Actor(num_states, upper_bound)
target_actor_model = target_actor_model.get_model()
target_critic_model = Critic(num_states, num_actions)
target_critic_model = target_critic_model.get_model()
# Making the weights equal initially
target_actor_model.set_weights(actor_model.get_weights())
target_critic_model.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 500
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(num_states, num_actions, 50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# loop over all episodes
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# render environment
env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
# get a noisy action (for exploration)
action, pureaction = get_policy_action(tf_prev_state, actor_model, ou_noise)
# take a step
state , reward, done , info = env.step(action)
buffer.remember_experience((prev_state, action, reward, state))
episodic_reward += reward
# process and learn
buffer.process_batch(gamma, critic_model, target_critic_model, actor_model, target_actor_model, critic_optimizer, actor_optimizer)
buffer.update_target(tau, critic_model, target_critic_model, actor_model, target_actor_model)
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
#Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
actor_model.save_weights("car_actor.h5")
critic_model.save_weights("car_critic.h5")
target_actor_model.save_weights("car_target_actor.h5")
target_critic_model.save_weights("car_target_critic.h5")
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
|
#!/bin/python3
import argparse
import logging
parser = argparse.ArgumentParser(description="Generator plots of optical functions")
parser.add_argument("path to xml", metavar='path', type=str, help="Path to xml file")
parser.add_argument("-v", "--verbose", dest='logging-level', action='store_const', const=logging.DEBUG, default=logging.INFO, help="Verbosity of program, if set, logs from madx will be created")
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(getattr(args, "logging-level"))
import models.train_model as trainer
from ROOT import TObject, TFile
import os
import xml_parser.approximator_training_configuration as xml_parser
def main(path_to_xml_file, path_to_optics):
training_configurations = xml_parser.get_approximator_configurations_from(path_to_xml_file)
for training_configuration in training_configurations:
approximator = trainer.train_from_configuration(training_configuration)
path_to_parametrization = os.path.join(path_to_optics, training_configuration.destination_file_name)
file = TFile.Open(path_to_parametrization, "update")
approximator.Write(training_configuration.approximator_configuration.name_of_approximator, TObject.kOverwrite)
file.Close()
if __name__ == "__main__":
path_to_xml_file = getattr(args, "path to xml")
path_to_optic = os.path.split(path_to_xml_file)[0]
main(path_to_xml_file, path_to_optic)
|
from .actions import Action
from .status import Status
from .chambers import Chamber
|
import requests # Used to make HTTP requests
import json # Used to parse JSON
import os # Used to infer environment variables
from ..internal import fixFilter
API_TAGO = os.environ.get('TAGOIO_API') or 'https://api.tago.io'
class Network:
def __init__(self, acc_token):
self.token = acc_token
self.default_headers = {
'content-type': 'application/json', 'Account-Token': acc_token}
return
def list(self, page=1, fields=['id', 'name'], filter={}, amount=20, orderBy='name,asc'):
params = {
'page': page,
'fields': fields,
'filter': filter,
'amount': amount,
'orderBy': orderBy,
}
params = fixFilter(params, filter)
return requests.get('{api_endpoint}/integration/network'.format(api_endpoint=API_TAGO), headers=self.default_headers, data=params).json()
def info(self, network_id, fields=['id', 'name']):
if network_id is None or network_id == '':
return self.list()
params = {'fields': fields}
return requests.get('{api_endpoint}/integration/network/{network_id}'.format(api_endpoint=API_TAGO), params=params, headers=self.default_headers).json()
def create(self, data):
data = data if data else {}
return requests.post('{api_endpoint}/integration/network'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def edit(self, network_id, data):
data = data if data else {}
return requests.put('{api_endpoint}/integration/network/{network_id}'.format(api_endpoint=API_TAGO, network_id=network_id), headers=self.default_headers, json=data).json()
def delete(self, network_id):
return requests.delete('{api_endpoint}/integration/network/{network_id}'.format(api_endpoint=API_TAGO, network_id=network_id), headers=self.default_headers).json()
def tokenList(self, network_id, page=1, amount=20, filter={}, fields=['name', 'token', 'created_at'], orderBy='created_at,desc'):
params = {
'page': page,
'filter': filter,
'amount': amount,
'orderBy': orderBy,
'fields': fields,
}
params = fixFilter(params, filter)
return requests.get('{api_endpoint}/integration/network/token/{network_id}'.format(api_endpoint=API_TAGO, network_id=network_id), headers=self.default_headers, params=params).json()
def tokenCreate(self, network_id, data):
data = data if data else {}
data['network'] = network_id
return requests.post('{api_endpoint}/integration/network/token'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def tokenDelete(self, token):
return requests.delete('{api_endpoint}/integration/network/token/{token}'.format(api_endpoint=API_TAGO, token=token), headers=self.default_headers).json()
|
'''
给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000
(“回文串”是一个正读和反读都一样的字符串,比如“level”或者“noon”等等就是回文串)
'''
class Solution:
def longestPalindrome(self, s):
'''
方法1
DP
'''
n = len(s)
dp = [[False] * n for _ in range(n)]
ans = ''
for l in range(n):
for i in range(n):
j = i + l
if j >= n:
break
elif l == 0:
dp[i][j] = True
elif l == 1:
dp[i][j] = (s[i] == s[j])
else:
dp[i][j] = (dp[i + 1][j - 1] and s[i] == s[j])
if dp[i][j] and l + 1 > len(ans):
ans = s[i: j + 1]
return ans
if __name__ == '__main__':
n = 'abcdeedcb'
ins = Solution()
final1 = ins.longestPalindrome(n)
# final2 = ins.numTrees2(n)
print(final1)
|
from PySide import QtGui, QtCore
import settings
import utils
class NewDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(NewDialog, self).__init__(parent)
r = QtGui.QDesktopWidget().availableGeometry()
self.setGeometry(r.width()*0.25,
r.height() * 0.25,
r.width() * 0.5,
r.height() * 0.5)
envGroupBox = QtGui.QGroupBox("Environment", self)
layout = QtGui.QVBoxLayout()
self.envComboBox = QtGui.QComboBox(envGroupBox)
self.envComboBox.addItems(self.getEnvironments())
layout.addWidget(self.envComboBox)
envGroupBox.setLayout(layout)
envGroupBox.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
self.buffersGroupBox = BufferGroupBox(self)
self.kernels = []
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | \
QtGui.QDialogButtonBox.Cancel)
addKernelBtn = buttonBox.addButton("Add Kernel",
QtGui.QDialogButtonBox.ActionRole)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
addKernelBtn.clicked.connect(self.addKernel)
self.topLayout = QtGui.QVBoxLayout()
self.topLayout.addWidget(envGroupBox)
self.topLayout.addWidget(self.buffersGroupBox)
# Create tab widget
self.tabWidget = QtGui.QTabWidget(self)
tmp = QtGui.QWidget(self.tabWidget)
tmp.setLayout(self.topLayout)
self.tabWidget.addTab(tmp, "&General")
self.addKernel()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.tabWidget)
layout.addWidget(buttonBox)
self.setLayout(layout)
self.setWindowTitle("Configuration")
@staticmethod
def getEnvironments():
return ["OpenCL", "CUDA", "GLSL"]
def addKernel(self):
kernel = KernelGroupBox(len(self.kernels), self)
self.kernels.append(kernel)
self.tabWidget.addTab(kernel, "Kernel%i" % len(self.kernels))
def initFromSettings(self, settings):
idx = self.getEnvironments().index(settings.environment)
self.envComboBox.setCurrentIndex(idx)
for b in settings.buffers:
self.buffersGroupBox.addBuffer()
buffer = self.buffersGroupBox.buffers[-1]
buffer.nameLineEdit.setText(b.name)
idx = ["half", "ubyte", "float"].index(b.type)
buffer.typeComboBox.setCurrentIndex(idx)
buffer.channelsComboBox.setCurrentIndex(b.channels-1)
for i,k in enumerate(settings.kernels):
if i: # First kernel is always added automatically
self.addKernel()
kernel = self.kernels[-1]
kernel.nameLineEdit.setText(k.name)
kernel.codeFileLineEdit.setText(k.code_file)
# In Buffers
kernel.inBuffersComboBox.setCurrentIndex(len(k.inBuffers))
for lineEdit, inb in zip(kernel.inBufferLineEdits, k.inBuffers):
lineEdit.setText(inb.name)
# Out Buffers
kernel.outBuffersComboBox.setCurrentIndex(len(k.outBuffers))
for lineEdit, outb in zip(kernel.outBufferLineEdits, k.outBuffers):
lineEdit.setText(outb.name)
for p in k.params:
param = kernel.addParam()
param.nameLineEdit.setText(p.name)
idx = ["float", "int"].index(p.type)
param.typeComboBox.setCurrentIndex(idx)
param.defaultLineEdit.setText(str(p.default))
param.minLineEdit.setText(str(p.min))
param.maxLineEdit.setText(str(p.max))
def getSettings(self):
s = settings.Settings()
# Environment
s.environment = str(self.envComboBox.currentText())
# Buffers
for b in self.buffersGroupBox.buffers:
s.buffers.append(
settings.Settings.Buffer(
str(b.nameLineEdit.text()),
str(b.typeComboBox.currentText()).strip(),
utils.safeEval(str(b.channelsComboBox.currentText()))))
# Kernels
for k in self.kernels:
sk = settings.Settings.Kernel(
str(k.nameLineEdit.text()).replace(" ", "_"),
str(k.codeFileLineEdit.text()))
# In Buffers
for le in k.inBufferLineEdits:
sk.inBuffers.append(
settings.Settings.Kernel.KernelBuffer(str(le.text()),""))
# Out Buffers
for le in k.outBufferLineEdits:
sk.outBuffers.append(
settings.Settings.Kernel.KernelBuffer(str(le.text()),""))
# Params
for p in k.params:
param = settings.Settings.Param(
str(p.nameLineEdit.text()),
str(p.typeComboBox.currentText()).strip(),
utils.safeEval(p.defaultLineEdit.text()),
utils.safeEval(p.minLineEdit.text()),
utils.safeEval(p.maxLineEdit.text()))
sk.params.append(param)
s.kernels.append(sk)
s.updateCode()
return s
class BufferGroupBox(QtGui.QGroupBox):
class Buffer(object):
def __init__(self, gridLayout, row, parent):
self.nameLineEdit = QtGui.QLineEdit("buffer%i" % row, parent)
self.typeComboBox = QtGui.QComboBox(parent)
self.typeComboBox.addItems(["half\t","ubyte\t","float\t"])
self.channelsComboBox = QtGui.QComboBox(parent)
self.channelsComboBox.addItems(["1", "2", "3", "4"])
self.channelsComboBox.setCurrentIndex(3)
gridLayout.addWidget(self.nameLineEdit, row, 0)
gridLayout.addWidget(self.typeComboBox, row, 1)
gridLayout.addWidget(self.channelsComboBox, row, 2)
def __init__(self, parent):
super(BufferGroupBox,self).__init__(parent)
self.buffers = []
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.addWidget(QtGui.QLabel("Buffer Name",self), 0, 0)
self.gridLayout.addWidget(QtGui.QLabel("Type",self), 0, 1)
self.gridLayout.addWidget(QtGui.QLabel("Channels",self), 0, 2)
addBufferButton = QtGui.QPushButton(" Add Buffer ", self)
addBufferButton.clicked.connect(self.addBuffer)
addBufferButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
layout = QtGui.QVBoxLayout()
layout.addLayout(self.gridLayout)
layout.addWidget(addBufferButton)
self.setLayout(layout)
layout.addStretch()
def addBuffer(self):
b = BufferGroupBox.Buffer(self.gridLayout, len(self.buffers)+1, self)
self.buffers.append(b)
class KernelGroupBox(QtGui.QGroupBox):
class Parameter(object):
def __init__(self, gridLayout, row, parent):
self.nameLineEdit = QtGui.QLineEdit("param%i" % row, parent)
self.typeComboBox = QtGui.QComboBox(parent)
self.typeComboBox.addItems(["float\t","int\t"])
self.defaultLineEdit = QtGui.QLineEdit("0.0",parent)
self.minLineEdit = QtGui.QLineEdit("0.0",parent)
self.maxLineEdit = QtGui.QLineEdit("10.0",parent)
gridLayout.addWidget(self.nameLineEdit, row, 0)
gridLayout.addWidget(self.typeComboBox, row, 1)
gridLayout.addWidget(self.defaultLineEdit, row, 2)
gridLayout.addWidget(self.minLineEdit, row, 3)
gridLayout.addWidget(self.maxLineEdit, row, 4)
def getData(self):
name = self.nameLineEdit.text()
type = str(self.typeComboBox.currentText()).strip()
defaultVal = self.defaultLineEdit.text()
minVal = self.minLineEdit.text()
maxVal = self.maxLineEdit.text()
if type == "int":
defaultVal = str(utils.safeEval(defaultVal,"int"))
minVal = str(utils.safeEval(minVal,"int"))
maxVal = str(utils.safeEval(maxVal,"int"))
return name, type, defaultVal, minVal, maxVal
def __init__(self, number, parent):
super(KernelGroupBox, self).__init__("Kernel %i" % number, parent)
self.parentDialog = parent
nameLabel = QtGui.QLabel("Name: ", self)
self.nameLineEdit = QtGui.QLineEdit("Untitled%i" % number, self)
topLayout = QtGui.QHBoxLayout()
topLayout.addWidget(nameLabel)
topLayout.addWidget(self.nameLineEdit)
codeFileLabel = QtGui.QLabel("Code: ", self)
self.codeFileLineEdit = QtGui.QLineEdit("")
codeBtn = QtGui.QPushButton("...")
codeBtn.clicked.connect(self.onCodeBtnPress)
midLayout = QtGui.QHBoxLayout()
midLayout.addWidget(codeFileLabel)
midLayout.addWidget(self.codeFileLineEdit)
midLayout.addWidget(codeBtn)
inBuffersGroupBox = QtGui.QGroupBox("Input Buffers", self)
self.inBuffersLayout = QtGui.QVBoxLayout()
inBuffersGroupBox.setLayout(self.inBuffersLayout)
layout = QtGui.QHBoxLayout()
label = QtGui.QLabel("Number of input buffers:", inBuffersGroupBox)
label.setSizePolicy(QtGui.QSizePolicy.Maximum,QtGui.QSizePolicy.Maximum)
self.inBuffersComboBox = QtGui.QComboBox(inBuffersGroupBox)
self.inBuffersComboBox.addItems([str(i) for i in range(6)])
self.inBuffersComboBox.currentIndexChanged.connect(self.setInBuffers)
layout.addWidget(label)
layout.addWidget(self.inBuffersComboBox)
self.inBufferLineEdits = []
self.inBuffersLayout.addLayout(layout)
self.inBuffersLayout.addStretch()
outBuffersGroupBox = QtGui.QGroupBox("Output Buffers", self)
self.outBuffersLayout = QtGui.QVBoxLayout()
outBuffersGroupBox.setLayout(self.outBuffersLayout)
layout = QtGui.QHBoxLayout()
label = QtGui.QLabel("Number of output buffers:", outBuffersGroupBox)
label.setSizePolicy(QtGui.QSizePolicy.Maximum,QtGui.QSizePolicy.Maximum)
self.outBuffersComboBox = QtGui.QComboBox(outBuffersGroupBox)
self.outBuffersComboBox.addItems([str(i) for i in range(6)])
self.outBuffersComboBox.currentIndexChanged.connect(self.setOutBuffers)
layout.addWidget(label)
layout.addWidget(self.outBuffersComboBox)
self.outBufferLineEdits = []
self.outBuffersLayout.addLayout(layout)
self.outBuffersLayout.addStretch()
paramGroupBox = QtGui.QGroupBox("Parameters", self)
self.gridLayout = QtGui.QGridLayout()
paramGroupBox.setLayout(self.gridLayout)
self.gridLayout.addWidget(QtGui.QLabel("Name",self), 0, 0)
self.gridLayout.addWidget(QtGui.QLabel("Type",self), 0, 1)
self.gridLayout.addWidget(QtGui.QLabel("Default",self), 0, 2)
self.gridLayout.addWidget(QtGui.QLabel("Min",self), 0, 3)
self.gridLayout.addWidget(QtGui.QLabel("Max",self), 0, 4)
self.params = []
addParamButton = QtGui.QPushButton(" Add Param ", self)
addParamButton.clicked.connect(self.addParam)
addParamButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
# Shrink all group boxes
for groupBox in [inBuffersGroupBox, outBuffersGroupBox, paramGroupBox]:
groupBox.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
layout = QtGui.QVBoxLayout()
layout.addLayout(topLayout)
layout.addLayout(midLayout)
layout.addWidget(inBuffersGroupBox)
layout.addWidget(outBuffersGroupBox)
layout.addWidget(paramGroupBox)
layout.addWidget(addParamButton)
layout.addStretch()
self.setLayout(layout)
def addInBuffer(self, name):
lineEdit = QtGui.QLineEdit(name)
self.inBuffersLayout.addWidget(lineEdit)
self.inBufferLineEdits.append(lineEdit)
def addOutBuffer(self, name):
lineEdit = QtGui.QLineEdit(name)
self.outBuffersLayout.addWidget(lineEdit)
self.outBufferLineEdits.append(lineEdit)
def setInBuffers(self, idx):
for lineEdit in self.inBufferLineEdits:
self.inBuffersLayout.removeWidget(lineEdit)
self.inBufferLineEdits = []
for i in range(idx):
self.addInBuffer("inBuffer%i" %i)
def setOutBuffers(self, idx):
for lineEdit in self.outBufferLineEdits:
self.outBuffersLayout.removeWidget(lineEdit)
self.outBufferLineEdits = []
for i in range(idx):
self.addOutBuffer("outBuffer%i" %i)
def onCodeBtnPress(self):
suffix = ".cl"
if str(self.parentDialog.envComboBox.currentText()) == "CUDA":
suffix = ".cu"
elif str(self.parentDialog.envComboBox.currentText()) == "GLSL":
suffix = ".glsl"
name = "/" + self.getName() + suffix
title = "Code output file for kernel " + self.getName()
f = QtGui.QFileDialog.getSaveFileName(
None, title, QtCore.QDir.currentPath() + name, " (*)")
if f[0]:
self.codeFileLineEdit.setText(f[0])
def getName(self):
return self.nameLineEdit.text()
def addParam(self):
p = KernelGroupBox.Parameter(self.gridLayout, len(self.params)+1, self)
self.params.append(p)
return p
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-statements
from azure.cli.core.commands import CliCommandType
from azure.cli.command_modules.apim._format import (service_output_format)
from azure.cli.command_modules.apim._client_factory import (cf_service, cf_api, cf_product, cf_nv, cf_apiops,
cf_apirelease, cf_apirevision, cf_apiversionset)
def load_command_table(self, _):
service_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiManagementServiceOperations.{}',
client_factory=cf_service
)
api_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiOperations.{}',
client_factory=cf_api
)
product_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ProductOperations.{}',
client_factory=cf_product
)
nv_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#NamedValueOperations.{}',
client_factory=cf_nv
)
apiops_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiOperationOperations.{}',
client_factory=cf_apiops
)
apirel_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiReleaseOperations.{}',
client_factory=cf_apirelease
)
apirev_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiRevisionOperations.{}',
client_factory=cf_apirevision
)
apivs_sdk = CliCommandType(
operations_tmpl='azure.mgmt.apimanagement.operations#ApiVersionSetOperations.{}',
client_factory=cf_apiversionset
)
# pylint: disable=line-too-long
with self.command_group('apim', service_sdk) as g:
g.custom_command('create', 'apim_create', supports_no_wait=True, table_transformer=service_output_format)
g.custom_show_command('show', 'apim_get', table_transformer=service_output_format)
g.custom_command('list', 'apim_list', table_transformer=service_output_format)
g.command('delete', 'begin_delete', confirmation=True, supports_no_wait=True)
g.generic_update_command('update', custom_func_name='apim_update', getter_name='get',
setter_name='begin_create_or_update', supports_no_wait=True)
g.custom_command('check-name', 'apim_check_name_availability')
g.custom_command('backup', 'apim_backup', supports_no_wait=True)
g.custom_command('restore', 'apim_restore', supports_no_wait=True)
g.custom_command('apply-network-updates', 'apim_apply_network_configuration_updates', supports_no_wait=True)
g.wait_command('wait')
with self.command_group('apim api', api_sdk) as g:
g.custom_command('import', 'apim_api_import', supports_no_wait=True)
g.custom_command('create', 'apim_api_create', supports_no_wait=True)
g.custom_show_command('show', 'apim_api_get')
g.custom_command('list', 'apim_api_list')
g.custom_command('delete', 'apim_api_delete', confirmation=True, supports_no_wait=True)
g.generic_update_command('update', custom_func_name='apim_api_update',
setter_name='begin_create_or_update', getter_name='get', supports_no_wait=True)
g.wait_command('wait')
with self.command_group('apim product api', api_sdk) as g:
g.custom_command('list', 'apim_product_api_list')
g.custom_command('check', 'apim_product_api_check_association')
g.custom_command('add', 'apim_product_api_add')
g.custom_command('delete', 'apim_product_api_delete')
with self.command_group('apim product', product_sdk) as g:
g.custom_command('list', 'apim_product_list')
g.custom_show_command('show', 'apim_product_show')
g.custom_command('create', 'apim_product_create', supports_no_wait=True)
g.generic_update_command('update', custom_func_name='apim_product_update', supports_no_wait=True)
g.custom_command('delete', 'apim_product_delete', confirmation=True, supports_no_wait=True)
g.wait_command('wait')
with self.command_group('apim nv', nv_sdk) as g:
g.custom_command('create', 'apim_nv_create', supports_no_wait=True)
g.custom_show_command('show', 'apim_nv_get')
g.custom_command('list', 'apim_nv_list')
g.custom_command('delete', 'apim_nv_delete', confirmation=True)
g.custom_command('show-secret', 'apim_nv_show_secret')
g.generic_update_command('update', setter_name='begin_create_or_update', custom_func_name='apim_nv_update')
with self.command_group('apim api operation', apiops_sdk) as g:
g.custom_command('list', 'apim_api_operation_list')
g.custom_show_command('show', 'apim_api_operation_get')
g.custom_command('create', 'apim_api_operation_create')
g.generic_update_command('update', custom_func_name='apim_api_operation_update')
g.custom_command('delete', 'apim_api_operation_delete')
with self.command_group('apim api release', apirel_sdk) as g:
g.custom_command('list', 'apim_api_release_list')
g.custom_show_command('show', 'apim_api_release_show')
g.custom_command('create', 'apim_api_release_create')
g.generic_update_command('update', custom_func_name='apim_api_release_update')
g.custom_command('delete', 'apim_api_release_delete')
with self.command_group('apim api revision', apirev_sdk) as g:
g.custom_command('list', 'apim_api_revision_list')
g.custom_command('create', 'apim_api_revision_create')
with self.command_group('apim api versionset', apivs_sdk) as g:
g.custom_command('list', 'apim_api_vs_list')
g.custom_show_command('show', 'apim_api_vs_show')
g.custom_command('create', 'apim_api_vs_create')
g.generic_update_command('update', custom_func_name='apim_api_vs_update')
g.custom_command('delete', 'apim_api_vs_delete')
|
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
EventData, \
CompoundEventData
DUMP_PARTIAL = [
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientObjectBreak',
docs=FunctionDoc(
description='This event is fired before an object breaks.' ,
arguments={
"attacker": """the vehicle/ped/player who is breaking the object """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='attacker',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientObjectDamage',
docs=FunctionDoc(
description='This event is fired before an object gets damaged.' ,
arguments={
"loss": """the health loss caused by the damage. This parameter contains the theoretical loss, which could be less than 0, if you substract it of the current health. If you want to get the real loss, you have to substract the new health of the old health (use a timer for this). """,
"attacker": """the vehicle/ped/player who is damaging the object. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='loss',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='attacker',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientObjectMoveStart',
docs=FunctionDoc(
description='' ,
arguments={
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientObjectMoveStop',
docs=FunctionDoc(
description='' ,
arguments={
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
)
],
)
]
|
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from django.views.generic import FormView, ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.models import User
from django.db.models import Count
import datetime
from itertools import chain
from django.conf import settings
from .models import Stage, Company, Contact, Campaign, Opportunity, Reminder, Report, CallLog, OpportunityStage
# Create your views here.
#the search query
def search(request):
if request.method == 'GET':
if request.GET.get('q'):
contact_results = []
opp_results = []
note_results = []
search_words = "%s" % (request.GET.get('q'))
search_word_list = search_words.split(' ')
for search_word in search_word_list:
print search_word
contact_firstname = Contact.objects.filter(first_name__icontains = search_word)
contact_lastname = Contact.objects.filter(last_name__icontains = search_word)
contact_company = Contact.objects.filter(company__name__icontains = search_word)
opp_firstname = Opportunity.objects.filter(contact__first_name__icontains = search_word)
opp_lastname = Opportunity.objects.filter(contact__last_name__icontains = search_word)
opp_stage = Opportunity.objects.filter(stage__name__icontains = search_word)
contact_results = contact_results + list(contact_firstname) + list(contact_lastname) + list(contact_company)
opp_results = opp_results + list(opp_firstname) + list(opp_lastname) + list(opp_stage)
call_note = CallLog.objects.filter(note__icontains = search_word)
reminder_note = Reminder.objects.filter(note__icontains = search_word)
note_results = note_results + list(call_note) + list(reminder_note)
return render_to_response('crm/search_results.html', {'search':search_words, 'contacts': contact_results, 'opps': opp_results, 'notes': note_results}, context_instance=RequestContext(request))
return render_to_response('crm/search_results.html', context_instance=RequestContext(request))
class Dashboard(ListView):
model = Opportunity
template_name = "crm/dashboard.html"
def get_context_data(self, **kwargs):
context = super(Dashboard, self).get_context_data(**kwargs)
#Adding OpportunityStages to the templates' context
context["opportunity_stages"] = OpportunityStage.objects.all().order_by('-time_stamp')
context["reminders"] = Reminder.objects.all().order_by('-date')[:5]
context["stage_by_opp"] = Stage.objects.annotate(opp_count = Count('opportunity'))
context['opportunity_list'] = Opportunity.objects.all().order_by('-create_date')[:5]
return context
## Call Views ##
class ListCallView(ListView):
model = CallLog
paginate_by = 10
class ViewCallView(DetailView):
model = CallLog
class CreateCallView(CreateView):
model = CallLog
fields = ['opportunity', 'note']
def get_success_url(self):
return reverse('crm:dashboard')
def form_valid(self, form):
call = form.save(commit=False)
call.user = self.request.user
call.save()
return super(CreateCallView, self).form_valid(form)
class DeleteCallView(DeleteView):
model = CallLog
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Stage Views ##
class ListStageView(ListView):
model = Stage
paginate_by = 10
class ViewStageView(DetailView):
model = Stage
class CreateStageView(CreateView):
model = Stage
fields = ['name', 'order', 'description', 'value']
def get_success_url(self):
return reverse('crm:dashboard')
class UpdateStageView(UpdateView):
model = Stage
fields = ['name', 'order', 'description', 'value']
def get_success_url(self):
return reverse('crm:dashboard')
class DeleteStageView(DeleteView):
model = Stage
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Company Views ##
class ListCompanyView(ListView):
model = Company
paginate_by = 10
class ViewCompanyView(DetailView):
model = Company
class UpdateCompanyView(UpdateView):
model = Company
fields = ['name', 'website', 'address1', 'address2', 'city', 'state', 'zipcode', 'country', 'phone']
def get_success_url(self):
return reverse('crm:dashboard')
class CreateCompanyView(CreateView):
model = Company
fields = ['name', 'website', 'address1', 'address2', 'city', 'state', 'zipcode', 'country', 'phone']
def get_success_url(self):
return reverse('crm:dashboard')
class DeleteCompanyView(DeleteView):
model = Company
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Contact Views ##
class ListContactView(ListView):
model = Contact
paginate_by = 10
class ViewContactView(DetailView):
model = Contact
class UpdateContactView(UpdateView):
model = Contact
fields = ['company', 'first_name', 'last_name', 'email', 'address1', 'address2', 'city', 'state', 'zipcode', 'country', 'phone']
def get_success_url(self):
return reverse('crm:dashboard')
class CreateContactView(CreateView):
model = Contact
fields = ['company', 'first_name', 'last_name', 'email', 'address1', 'address2', 'city', 'state', 'zipcode', 'country', 'phone']
def get_success_url(self):
return reverse('crm:dashboard')
class DeleteContactView(DeleteView):
model = Contact
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Reminder Views ##
class ListReminderView(ListView):
model = Reminder
paginate_by = 10
class ViewReminderView(DetailView):
model = Reminder
class CreateReminderView(CreateView):
model = Reminder
fields = ['opportunity', 'date', 'note']
def get_success_url(self):
return reverse('crm:dashboard')
class UpdateReminderView(UpdateView):
model = Reminder
fields = ['note', 'completed']
def get_success_url(self):
return reverse('crm:dashboard')
class DeleteReminderView(DeleteView):
model = Reminder
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Campaign Views ##
class ListCampaignView(ListView):
model = Campaign
paginate_by = 10
class ViewCampaignView(DetailView):
model = Campaign
class CreateCampaignView(CreateView):
model = Campaign
fields = ['name', 'description']
def get_success_url(self):
return reverse('crm:dashboard')
class UpdateCampaignView(UpdateView):
model = Campaign
fields = ['name', 'description']
def get_success_url(self):
return reverse('crm:dashboard')
class DeleteCampaignView(DeleteView):
model = Campaign
def get_success_url(self):
return reverse('crm:dashboard')
####################End######################
## Opportunity Views ##
class ListOpportunityView(ListView):
model = Opportunity
paginate_by = 10
class ViewOpportunityView(DetailView):
model = Opportunity
class CreateOpportunityView(CreateView):
model = Opportunity
fields = ['stage', 'company', 'contact', 'value', 'source', 'user']
def get_success_url(self):
return reverse('crm:dashboard')
class UpdateOpportunityView(UpdateView):
model = Opportunity
fields = ['stage', 'company', 'contact', 'value', 'source', 'user']
def get_success_url(self):
return reverse('crm:dashboard')
def form_valid(form, self):
opp = form.save(commit= false)
if opp.stage.value != self.get_object().stage.value:
o = OpportunityStage()
o.opportunity = self.get_object()
o.user = self.request.user
o.save()
return super(UpdateOpportunityView, self).form_valid(form)
class DeleteOpportunityView(DeleteView):
model = Opportunity
def get_success_url(self):
return reverse('crm:dashboard')
##########################################
##OpportunityStage View##
class CreateOpportunityStageView(CreateView):
model = OpportunityStage
fields = ['opportunity','stage']
def get_success_url(self):
return reverse('crm:dashboard')
def form_valid(form, self):
opstage = form.save(commit=False)
opstage.user = User.objects.get(user=self.request.user)
opstage.opportunity.stage = opstage.stage
opstage.save()
return super(CreateOpportunityStageView, self).form_valid(form)
class ReportsView(ListView):
model = Report
|
"""A tool to scan a directory tree and generate snippets of an app.yaml file.
When configuring static paths in app.yaml file, there are a few things that may
need to be done manually. Specifically, if there's a desire to have an url like
'directory/' to serve the file 'directory/index.html', there has to be a static
mapping.
At times, it is also necessary to provide mime types explicitly.
This script scans a directory tree, and based on command line flags, expands a
per file template providing inputs.
"""
import os
import sys
import argparse
import mimetypes
import textwrap
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prefix", default="handlers:", help="Optional prefix to prepend to the output")
parser.add_argument("-i", "--index", action="append", help="Index files to look for - if not specified, will process all files")
parser.add_argument("-t", "--template", help="Path to a template file to use to generate each entry in the output")
parser.add_argument("-u", "--url-strip", default=[], action="append", help="Prefixes to strip from the path to create an url")
parser.add_argument("-r", "--root-strip", default="", help="Prefixes to strip from the path to create the path of the file in the config")
parser.add_argument("-l", "--login", default="", help="If specifiled, a 'login: <value-specified>' will be added to the generated handlers")
parser.add_argument("dir", help="One or more directories to scan for files", nargs="+")
args = parser.parse_args(argv[1:])
template = textwrap.dedent("""\
url: {urldir}
static_files: {filename}
upload: {filename}""")
if args.template:
template = open(args.template()).read().strip()
if args.prefix:
print(args.prefix)
config = {}
for indir in args.dir:
for root, subdirs, subfiles in os.walk(indir):
if args.index:
for index in args.index:
if index in subfiles:
subfiles = [index]
break
else:
continue
for index in subfiles:
filename = os.path.join(root, index)
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding is None:
mimeencoding = ""
urlfile = filename
urldir = os.path.dirname(urlfile)
for strip in args.url_strip:
if urlfile.startswith(strip):
urlfile = urlfile[len(strip):]
urldir = os.path.dirname(urlfile)
break
# Why? Let's say /dir is mapped to /dir/index.html.
# Any relative path in index.html will not work, will use / as parent directory.
# Instead, we need to map /dir/ to /dir/index.thml.
urldir = os.path.join(urldir, "")
if filename.startswith(args.root_strip):
filename = filename[len(args.root_strip):]
filename = filename.strip("/")
filedir = os.path.dirname(filename)
expanded = template.format(
filename = filename,
filedir = filedir,
urldir = urldir,
urlfile = urlfile,
mimetype = mimetype,
mimeencoding = mimeencoding
).split("\n")
if args.login:
expanded.append(f"login: {args.login}")
wrapped = "- " + "\n".join([" " + l for l in expanded])[2:]
print(wrapped)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.