repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
hoffmangroup/cla | claweb/bin/claweb_make_website.py | <filename>claweb/bin/claweb_make_website.py
#!/usr/bin/env python
import multiprocessing
from functools import partial
import sys
import os
import shutil
import argparse
import pandas as pd
from claweb import extra
from claweb.controller import genes
from claweb.controller import groups
from claweb.controller import index
from claweb.controller.comparisons import comparison_list
from claweb.controller.comparisons import comparisons
N_THREAD = os.cpu_count()
def main(args):
cfg, gac = extra.load_configs(args.config, args.group_and_comparison)
# create website base directory
if not os.path.exists(cfg['website']['output']):
os.makedirs(cfg['website']['output'])
# copy JS and CSS files to the website base directory
pycomp_module_path = os.path.dirname(extra.__file__)
static_path = os.path.join(pycomp_module_path, 'static')
static_output_path = os.path.join(cfg['website']['output'], 'static')
if not os.path.exists(static_output_path):
shutil.copytree(static_path, static_output_path)
# create index.html
index.main(cfg)
# create subsection main pages
comparison_list.comparison_list(cfg, gac)
genes.gene_list(cfg, gac)
groups.group_list(cfg, gac)
# create individual web pages
pool = multiprocessing.Pool(N_THREAD)
make_group = partial(groups.group, cfg, gac)
group_ids = [group['id'] for group in gac['group_definitions']]
pool.map(make_group, group_ids)
for dataset in cfg['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9)]
pool = multiprocessing.Pool(N_THREAD)
make_gene_card = partial(genes.gene_card, cfg, gac, dataset)
gene_list = list(set(df.gene.tolist()))
pool.map(make_gene_card, gene_list)
make_comp = partial(comparisons.comparisons, cfg, gac)
comp_ids = [comp['id'] for comp in gac['comparisons']]
pool.map(make_comp, comp_ids)
def parse_args(args):
parser = argparse.ArgumentParser(description='Make website')
parser.add_argument('config', type=str, help='path to config file')
parser.add_argument('group_and_comparison', type=str, help='path to group and comparison file')
return parser.parse_args(args)
if __name__ == '__main__':
args = sys.argv[1:]
parse_args(args)
main(parse_args(args))
|
hoffmangroup/cla | claweb/claweb/main.py | <filename>claweb/claweb/main.py
__author__ = 'mickael'
import argparse
import yaml
import sys
def run(config_file, comp_file):
with open(config_file) as yaml_file:
cfg = yaml.load(yaml_file)
with open(comp_file) as yaml_file:
group_and_comparisons = yaml.load(yaml_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Regularized Random Forest.')
parser.add_argument('--config_file', type=str, help='full path to the config file', required=True)
parser.add_argument('--comp_file', type=str, help='full path to the comparison file', required=True)
args = parser.parse_args(sys.argv[1:])
run(args.config_file, args.comp_file)
#TODO: handle the website folder creation |
hoffmangroup/cla | claweb/claweb/controller/master.py | #!/usr/bin/env python
import argparse
import multiprocessing
import os
import shutil
import sys
from functools import partial
import pandas as pd
from . import genes
from . import groups
from . import index
from .comparisons import comparison_list
from .comparisons import comparisons
from .. import extra
def make_index(cfg):
cfg = extra.load_config(cfg)
# create website base directory
if not os.path.exists(cfg['website']['output']):
os.makedirs(cfg['website']['output'])
# copy JS and CSS files to the website base directory
pycomp_module_path = os.path.dirname(extra.__file__)
static_path = os.path.join(pycomp_module_path, 'static')
static_output_path = os.path.join(cfg['website']['output'], 'static')
if not os.path.exists(static_output_path):
shutil.copytree(static_path, static_output_path)
# create index.html
index.main(cfg)
def make_genecards(cfg, gac, n_thread):
cfg = extra.load_config(cfg)
gac = extra.load_gac(gac)
make_index(cfg)
genes.gene_list(cfg, gac)
for dataset in cfg['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9)]
pool = multiprocessing.Pool(n_thread)
make_gene_card = partial(genes.gene_card, cfg, gac, dataset)
gene_list = set(df.gene.tolist())
pool.map(make_gene_card, gene_list)
def make_groups(cfg, gac, n_thread):
cfg = extra.load_config(cfg)
gac = extra.load_gac(gac)
make_index(cfg)
groups.group_list(cfg, gac)
pool = multiprocessing.Pool(n_thread)
make_group = partial(groups.group, cfg, gac)
group_ids = [group['id'] for group in gac['group_definitions']]
pool.map(make_group, group_ids)
def make_comparisons(cfg, gac, n_thread):
cfg = extra.load_config(cfg)
gac = extra.load_gac(gac)
make_index(cfg)
comparison_list.comparison_list(cfg, gac)
pool = multiprocessing.Pool(n_thread)
make_comparison = partial(comparisons.comparisons, cfg, gac)
comparison_ids = [comp['id'] for comp in gac['comparisons']]
pool.map(make_comparison, comparison_ids)
def main(config, group_and_comparison, n_thread):
cfg, gac = extra.load_configs(config, group_and_comparison)
make_index(cfg)
make_genecards(cfg, gac, n_thread)
make_groups(cfg, gac, n_thread)
make_comparisons(cfg, gac, n_thread)
def parse_args(args):
parser = argparse.ArgumentParser(description='Make website')
parser.add_argument('config', type=str, help='path to config file.')
parser.add_argument('group_and_comparison', type=str, help='path to group and comparison file.')
parser.add_argument('--n_thread', type=int, default=1, help='make genecards with `n` thread.')
args = parser.parse_args(args)
args.n_thread = min(os.cpu_count(), args.n_thread)
return args
def cli_make_website(args=sys.argv[1:]):
args = parse_args(args)
main(args.config, args.group_and_comparison, args.n_thread)
def cli_make_comparisons(args=sys.argv[1:]):
args = parse_args(args)
make_comparisons(args.config, args.group_and_comparison, args.n_thread)
def cli_make_genes(args=sys.argv[1:]):
args = parse_args(args)
make_genecards(args.config, args.group_and_comparison, args.n_thread)
if __name__ == '__main__':
# cli_make_website()
pass
|
hoffmangroup/cla | claweb/claweb/view/__init__.py | <reponame>hoffmangroup/cla
__author__ = 'mickael'
|
hoffmangroup/cla | claweb/claweb/model/gene_dist.py | __author__ = 'mickael'
import base64
import matplotlib.pyplot as plt
from io import StringIO
import pandas as pd
import seaborn as sns
from ..extra import sort_samples_by_correlation
def plot_gene_distribution(gene, exp_table, group_orders, groups):
results = []
for name in group_orders:
group = [group for group in groups if group['name'] == name][0]
try:
results.append(exp_table.loc[gene, group['samples']].values)
except:
pass
exit()
fig = plt.figure(figsize=(12, 6))
g = sns.boxplot(results)
t = g.set_xticklabels(labels=group_orders, rotation=90)
io = StringIO()
plt.savefig(io, format='png')
my_plot = base64.encodestring(io.getvalue())
plt.close(fig)
return my_plot
def gene_dist(config_file, group_and_comparisons, dataset, gene):
groups = group_and_comparisons['group_definitions']
summary = pd.read_csv(dataset['summary'], sep='\t')
exp = pd.read_csv(dataset['expression_table'], sep='\t', index_col=0)
index_order_name = sort_samples_by_correlation(group_and_comparisons)
return plot_gene_distribution(gene, exp, index_order_name, groups)
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/parse_20170801.py | <filename>claweb/claweb/f5clonto_helper/parse_20170801.py
#!/usr/bin env python3
import argparse
import sys
from collections import defaultdict
from collections.abc import MutableMapping
from itertools import combinations
from typing import List, Dict
import yaml
from .terms import CLTerm
from .terms import Term
def is_f5_sample(term_id):
return term_id.startswith('FF') and '-' in term_id
class TermDict(MutableMapping):
def __init__(self, *args, **kw):
self._storage = dict(*args, **kw)
def __getitem__(self, key):
return self._storage[key]
def __setitem__(self, key, value):
self._storage[key] = value
def __delitem__(self, key):
del self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
@classmethod
def from_filename(cls, filename):
storage = {}
f = open(filename, 'r')
while 1:
line = f.readline()
rows = []
if not line:
break
if "[Term]" not in line.strip():
continue
while 1:
line = f.readline().strip().split(': ')
if not line[0]:
break
line = [line[0], ": ".join(line[1:])]
rows.append(line)
term = Term(rows=rows)
storage[term.term_id] = term
f.close()
return cls(storage)
def ancestors(self, term_id, func, s):
"""
Return all the ancestors of `term_id` accessible through `is_a` where `func` return True.
:param term_id: get the ancestors of `term_id`.
:param func: evaluate the parents with this function.
:param s: set ancestor's term ids.
:return: s
"""
for parent_id in self[term_id].is_a:
if func(parent_id):
s.add(parent_id)
s.update(self.ancestors(parent_id, func, s))
return s
def get_cl_human_samples(self, human_sample="FF:0000210", tissue_sample='FF:0000004'):
cl_human_samples = defaultdict(set)
f5_sample_term_ids = [term_id
for term_id in self
if is_f5_sample(term_id)]
for term_id in f5_sample_term_ids:
ancestors = self.ancestors(term_id, lambda x: x.startswith('FF:'), set())
# current also need to be checked
# because samples can derive from CLs
ancestors.add(term_id)
if human_sample not in ancestors:
continue
if tissue_sample in ancestors:
continue
if 'phase1' not in self[term_id].subset:
continue
for ancestor in ancestors:
if not self[ancestor].relationship:
continue
for rel_type, parent in self[ancestor].relationship:
if rel_type == 'derives_from' and parent.startswith('CL:'):
cl_human_samples[parent].add(term_id)
parent_ancestors = self.ancestors(parent, lambda x: x.startswith('CL:'), set())
for parent_ancestor in parent_ancestors:
cl_human_samples[parent_ancestor].add(term_id)
# convert set of samples to list of samples
d = {k: list(v) for k, v in cl_human_samples.items()}
return d
def cl_terms(self):
term_id_to_cl_term = dict()
cl_human_samples = self.get_cl_human_samples()
for term_id, term in self.items():
if not term_id.startswith("CL:"):
continue
human_samples = cl_human_samples.get(term_id, [])
term_id_to_cl_term[term_id] = CLTerm(term, human_samples)
return term_id_to_cl_term
def get_terms_with_same_samples(self) -> List[List[CLTerm]]:
"""Return a list that lists the terms with the exact same non-empty set of samples.
:return: List[List[CLTerm]]
"""
res: List[List[CLTerm]] = []
# keep track of the terms known with same
# sample set as another term to avoid
# reporting a term twice.
processed_terms = set()
# exclude terms without samples to avoid considering them as similar.
terms_with_samples = [term
for term in self.cl_terms().values()
if term.has_sample]
# for each term, check every following term
for i, term in enumerate(terms_with_samples):
# skip term if we already know that it is similar to another term.
if term in processed_terms:
continue
# check if the following samples have the same samples
similar_terms = [comp_term
for comp_term in terms_with_samples[i+1:]
if comp_term.has_same_sample_set(term)]
# add current term to list of similar samples
# and update `res`
if similar_terms:
similar_terms.append(term)
processed_terms.update(similar_terms)
res.append(similar_terms)
return res
def get_oldest_parent(self, term_ids):
for i, term_id in enumerate(term_ids):
ancestors = self.ancestors(term_id, lambda x: x.startswith('CL:'), set())
has_term_in_ancestors = any(_id in ancestors for _id in term_ids)
if not has_term_in_ancestors:
return i, term_id
def get_deepest_child_from_term_ids(self, term_ids: List[str]) -> str:
"""Deepest child is a synonym for most specialized cell type.
Return the term_id of the cell type that has the most term_id from terms_ids in it's ancestors.
example: A - B - C - D
\
E
D has 3 ancestors, E has 2 ancestors. D is the deepest child of A.
:param term_ids: list of strings containing term_ids (["CL:...", "CL:..."], ...)
"""
deepest_id = term_ids[0]
deepest_score = 0
for term_id in term_ids:
ancestors = self.ancestors(term_id, lambda x: x.startswith('CL:'), set())
score = sum(1 for _id in term_ids if _id in ancestors)
if score > deepest_score:
deepest_score = score
deepest_id = term_id
return deepest_id
def collapse_cl_terms(self):
""" Collapse CL terms with the same set of non-empty sample.
Update the source and the target of the edges that refer to a
collapsed CL term.
XXX: use https://github.com/dhimmel/obonet
to manipulate the cell ontology.
Or, store nodes and edges separately.
:return: Dict[str, CLTerm]
"""
res: Dict[str, CLTerm] = self.cl_terms()
list_of_similar_terms: List[List[CLTerm]] = self.get_terms_with_same_samples()
cl_to_new_term = dict()
# merge similar terms
for similar_terms in list_of_similar_terms:
# merge terms
merged_term = CLTerm.from_clterms(similar_terms)
# index new term and delete the old ones
res[merged_term.term_id] = merged_term
for term in similar_terms:
cl_to_new_term[term.term_id] = merged_term
del res[term.term_id]
# update the is_a and relationships of all terms
for term in res.values():
term.update_is_a(cl_to_new_term)
term.update_relationship(cl_to_new_term)
return res
def main(input_cl_ontology, output_group_and_comparison):
main_terms = TermDict.from_filename(input_cl_ontology)
terms = main_terms.collapse_cl_terms().values()
terms = [term for term in terms if term.nb_of_sample > 10]
groups = []
for term in terms:
term_ids = term.term_id.split(term.merge_separator)
deepest_child = main_terms.get_deepest_child_from_term_ids(term_ids)
group = {
'id': term.term_id,
'name': term.name,
'print_name': main_terms[deepest_child].name,
'print_id': deepest_child,
'samples': sorted(sample.replace('FF:', '') for sample in term.samples)}
groups.append(group)
comparisons = []
for term0, term1 in combinations(terms, 2):
if not term0.samples_in_common(term1):
comparisons.append({'group1': term0.term_id, 'group2': term1.term_id})
for i, comparison in enumerate(comparisons):
comparison['id'] = i
yaml_dict = dict()
yaml_dict['group_definitions'] = groups
yaml_dict['comparisons'] = comparisons
with open(output_group_and_comparison, 'w') as yaml_file:
yaml_file.write(yaml.dump(yaml_dict, default_flow_style=False))
def parse_args():
parser = argparse.ArgumentParser(description='Make group and comparison file from the FANTOM5 cell ontology ')
parser.add_argument('input_fantom5_ontology', type=str,
help='full path to the FANTOM5 cell ontology', )
parser.add_argument('output_gac_file', type=str, help='output filepath')
args = parser.parse_args(sys.argv[1:])
return args
def cli_make_gac():
args = parse_args()
main(args.input_fantom5_ontology, args.output_gac_file)
if __name__ == '__main__':
cli_make_gac()
|
hoffmangroup/cla | claweb/claweb/model/groups.py | """
Created on Oct 7, 2013
@author: mmendez
"""
import pandas as pd
import os
def group(config_file, group_and_comparisons, group_id):
header = []
genes_counts = []
group = [group
for group in group_and_comparisons['group_definitions']
if group_id == group['id']][0]
for dataset in config_file['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9) & ((df.cl1 == group_id) | (df.cl2 == group_id))]
header.append([dataset['name'], df.id.value_counts().size])
gene_count = list(df.gene.value_counts().iteritems())
genes_counts.append(gene_count)
df = pd.DataFrame(genes_counts).T
group = {'id': group['id'],
'name': group['name'],
'print_id': group['print_id'],
'print_name': group['print_name'],
'header': header,
'rows': df.applymap(lambda x: ('', '') if pd.isnull(x) else x).values.tolist(),
}
return group
def group_list(config_file, group_and_comparisons):
datasets = []
for dataset in config_file['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9)]
datasets.append(df)
groups = []
for group in group_and_comparisons['group_definitions']:
nb_of_comps = []
nb_of_genes = []
for df in datasets:
nb_of_comps.append(df[(df.cl1 == group['id']) | (df.cl2 == group['id'])].id.value_counts().size)
nb_of_genes.append(df[(df.cl1 == group['id']) | (df.cl2 == group['id'])].gene.value_counts().size)
if not max(nb_of_comps):
continue
groups.append({
'name': group['name'],
'print_name': group['print_name'],
'link': os.path.join(config_file['website']['url'], 'groups', group['print_id'].replace(":", "_") + '.html'),
'nb_of_comp': max(nb_of_comps),
'gene_count': nb_of_genes
})
return sorted(groups, key=lambda x: x['print_name'])
|
hoffmangroup/cla | claweb/bin/claweb_make_website_gene.py | <gh_stars>0
#!/usr/bin/env python
import sys
import os
import shutil
import argparse
import pandas as pd
from claweb import extra
from claweb.controller import genes
from claweb.controller import groups
from claweb.controller import index
from claweb.controller.comparisons import comparison_list
from claweb.controller.comparisons import comparisons
def main(args):
cfg, gac = extra.load_configs(args.config, args.group_and_comparison)
# create website base directory
if not os.path.exists(cfg['website']['output']):
os.makedirs(cfg['website']['output'])
# copy JS and CSS files to the website base directory
pycomp_module_path = os.path.dirname(extra.__file__)
static_path = os.path.join(pycomp_module_path, 'static')
static_output_path = os.path.join(cfg['website']['output'], 'static')
if os.path.exists(static_output_path):
shutil.rmtree(static_output_path)
shutil.copytree(static_path, static_output_path)
# create index.html
# index.main(cfg)
#
# # create subsection main pages
# comparison_list.comparison_list(cfg, gac)
# genes.gene_list(cfg, gac)
# groups.group_list(cfg, gac)
# create individual web pages
# for group in gac['group_definitions']:
# groups.group(cfg, gac, group['id'])
for dataset in cfg['datasets']:
# for dataset in cfg['datasets'][1:2]:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9)]
assert args.gene in df.gene.unique()
# for gene in set(df.gene.tolist()):
genes.gene_card(cfg, gac, dataset, args.gene)
def parse_args(args):
parser = argparse.ArgumentParser(description='Make website')
parser.add_argument('config', type=str, help='path to config file')
parser.add_argument('group_and_comparison', type=str, help='path to group and comparison file')
parser.add_argument('gene', type=str, help='Generate gene card for specified gene')
return parser.parse_args(args)
if __name__ == '__main__':
args = sys.argv[1:]
parse_args(args)
# print("hello")
main(parse_args(args))
|
hoffmangroup/cla | claweb/claweb/controller/index.py | <filename>claweb/claweb/controller/index.py
import os
from ..model import summary
from jinja2 import Environment, FileSystemLoader
def main(config_file):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'index.tpl'
n_cell_types = summary.get_number_of_cell_types_with_results(config_file)
output = template.render(site=config_file['website'], number_of_cl=n_cell_types, tpl=child_template)
with open(os.path.join(config_file['website']['output'], "index.html"), "wb") as f:
f.write(output.encode("utf-8"))
print('index.html generated')
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/make_ontoviewer_coords.py | import argparse
import json
import sys
from . cytoscape_utils import get_node_attr_from_cytoscape
def main(cytoscape_file, output_graph):
nodes, edges = get_node_attr_from_cytoscape(cytoscape_file)
for node in nodes:
if nodes[node]["name"] == "sac":
nodes[node]["color"] = "orange"
nodes[node]["type"] = "short_node"
elif nodes[node]["name"] == "mc":
nodes[node]["color"] = "green"
nodes[node]["type"] = "short_node"
else:
if nodes[node]["name"] == "stuff accumulating cell":
nodes[node]["color"] = "orange"
elif nodes[node]["name"] == "motile cell":
nodes[node]["color"] = "green"
else:
nodes[node]["color"] = "black"
nodes[node]["type"] = "chart_node"
########## HEMAT ##########
anchor_x, anchor_y = (0, 1350)
x_dist, y_dist = (220, 220)
cols = [0, x_dist * 1, x_dist * 2, x_dist * 3.2, x_dist * 4.3]
cols = [anchor_x + c for c in cols]
rows = [y_dist * 0, y_dist * 1, y_dist * 2, y_dist * 3,
y_dist * 4, y_dist * 5, y_dist * 6]
rows = [anchor_y - r for r in rows]
nodes_coord_hemat = {
29796: {'name': 'hematopoietic\ncell', 'x': (cols[3] - cols[0]) / 2,
'y': rows[0] + y_dist * 0.5},
29795: {'name': 'blood\ncell', 'x': cols[0], 'y': rows[1]},
29800: {'name': 'granulocyte', 'x': cols[0], 'y': rows[3]},
29833: {'name': 'myeloid\ncell', 'x': cols[1], 'y': rows[1]},
29801: {'name': 'myeloid\nleukocyte', 'x': cols[1], 'y': rows[2]},
29828: {'name': 'monocyte', 'x': cols[2], 'y': rows[3]},
29835: {'name': 'CD14-positive\nmonocyte', 'x': cols[2], 'y': rows[4]},
29850: {'name': 'classical\nmonocyte', 'x': cols[2], 'y': rows[5]},
29782: {'name': 'mc**', 'x': cols[2] + x_dist * .2, 'y': rows[1] + y_dist * .5},
29825: {'name': 'leukocyte', 'x': cols[2], 'y': rows[1]},
29827: {'name': 'nongranular\nleukocyte', 'x': cols[3], 'y': rows[2]},
29824: {'name': 'dendritic\ncell', 'x': cols[2], 'y': rows[2]},
29849: {'name': 'phagocyte', 'x': (cols[1] - cols[0]) / 2, 'y': (rows[4] + rows[5]) / 2},
29781: {'name': 'mc**', 'x': cols[0], 'y': rows[4] + y_dist * .2},
29778: {'name': 'sac*', 'x': cols[1], 'y': rows[4] + y_dist * .2},
29843: {'name': 'hematopoietic\nprecursor\ncell', 'x': cols[3], 'y': rows[1]},
29826: {'name': 'nucleate\ncell', 'x': cols[4], 'y': rows[2]},
29799: {'name': 'lymphocyte', 'x': cols[4], 'y': rows[3]},
29818: {'name': 'lymphocyte\nof B lineage', 'x': cols[3], 'y': rows[4]},
29817: {'name': 'B cell', 'x': cols[3], 'y': rows[5]},
29798: {'name': 'T cell', 'x': cols[4], 'y': rows[4]},
29830: {'name': 'alpha-beta\nT cell', 'x': cols[4], 'y': rows[5]},
29829: {'name': 'CD8-positive,\nalpha-beta\nT cell', 'x': cols[4], 'y': rows[6]}, }
########## EPITH ##########
anchor_x, anchor_y = (0, 2500)
x_dist, y_dist = (235, 250)
cols = [x_dist * 0, x_dist * 1, x_dist * 2.2, x_dist * 3.2, x_dist * 4,
x_dist * 5, x_dist * 6, x_dist * 7, x_dist * 8, x_dist * 9]
cols = [anchor_x + c for c in cols]
rows = [anchor_y - 2600, y_dist * 1, y_dist * 2, y_dist * 3, y_dist * 4]
rows = [anchor_y - r for r in rows]
nodes_coord_epith = {
29788: {'name': 'epithelial\ncell', 'x': (cols[1] + cols[8]) / 2, 'y': rows[0]},
29792: {'name': 'columnar-cuboidal\nepithelial\ncell', 'x': cols[1], 'y': rows[1]},
29811: {'name': 'neurecto-epithelial\ncell', 'x': cols[1], 'y': rows[2]},
29810: {'name': 'melanocyte', 'x': (cols[0] + cols[1]) / 2, 'y': rows[3]},
54910: {'name': 'sac*', 'x': cols[0], 'y': rows[1]},
29809: {'name': 'pigment\ncell', 'x': cols[0], 'y': rows[2]},
29832: {'name': 'ecto-epithelial\ncell', 'x': cols[2], 'y': rows[1]},
29838: {'name': 'general\necto-epithelial\ncell', 'x': cols[2], 'y': rows[2]},
29790: {'name': 'squamous\nepithelial\ncell', 'x': cols[3], 'y': rows[1]},
29793: {'name': 'mesothelial\ncell', 'x': cols[3] + x_dist * .6, 'y': rows[3]},
29789: {'name': 'blood\nvessel\nendothelial\ncell', 'x': cols[3], 'y': rows[4]},
29794: {'name': 'lining\ncell', 'x': cols[4] + x_dist * .25, 'y': rows[2]},
29805: {'name': 'meso-epithelial\ncell', 'x': cols[5], 'y': rows[1]},
29804: {'name': 'endothelial\ncell', 'x': cols[5], 'y': (rows[2] + rows[3]) / 2},
29791: {'name': 'endothelial\ncell\nof vascular\ntree', 'x': cols[5], 'y': (rows[3] + rows[4]) / 2},
29845: {'name': 'kidney\nepithelial\ncell', 'x': cols[6], 'y': rows[1]},
29844: {'name': 'epithelial\ncell of\nnephron', 'x': cols[6], 'y': rows[2]},
29848: {'name': 'hepatocyte', 'x': cols[7], 'y': rows[1]},
29797: {'name': 'epithelial\ncell\nof lung', 'x': cols[4], 'y': rows[1]},
29836: {'name': 'endo-epithelial\ncell', 'x': cols[8], 'y': rows[1]},
29775: {'name': 'respiratory\nepithelial\ncell', 'x': (cols[7] + cols[8]) / 2, 'y': rows[2]},
29776: {'name': 'epithelial\ncell of\nalimentary\ncanal', 'x': (cols[8] + cols[9]) / 2, 'y': rows[2]},
}
########## CONNECT ##########
anchor_x, anchor_y = (1400, 1850)
x_dist, y_dist = (220, 220)
cols = [x_dist * 0, x_dist * 1, x_dist * 2, x_dist * 3, x_dist * 4]
cols = [anchor_x + c for c in cols]
rows = [y_dist * 0, y_dist * 1, y_dist * 2, y_dist * 3, y_dist * 4.2]
rows = [anchor_y - r for r in rows]
nodes_coord_connect = {
29787: {'name': 'connective\ntissue\ncell', 'x': cols[2], 'y': rows[1]},
29808: {'name': 'fat\ncell', 'x': cols[0], 'y': rows[2]},
29786: {'name': 'fibroblast', 'x': cols[1], 'y': rows[2]},
29842: {'name': 'skin\nfibroblast', 'x': cols[0], 'y': rows[3]},
29840: {'name': 'preadipocyte', 'x': cols[1], 'y': rows[3]},
29820: {'name': 'stromal\ncell', 'x': cols[2], 'y': rows[2]},
29812: {'name': 'secretory\ncell', 'x': cols[3], 'y': rows[2]},
29819: {'name': 'extracellular\nmatrix\nsecreting\ncell', 'x': cols[2], 'y': rows[3]},
29847: {'name': 'GAG\nsecreting\ncell', 'x': cols[2], 'y': rows[4]},
29784: {'name': 'stem\ncell', 'x': cols[4], 'y': rows[0]},
29807: {'name': 'multi\nfate\nstem\ncell', 'x': cols[4], 'y': rows[1]},
29806: {'name': 'mesenchymal\ncell', 'x': cols[4], 'y': rows[2]},
29780: {'name': 'mc**', 'x': cols[3] + x_dist * .3, 'y': rows[1] - y_dist * .3},
29777: {'name': 'sac*', 'x': cols[0], 'y': rows[1] - y_dist * .3},
}
########## MUSCLE ##########
anchor_x, anchor_y = (1300, 800)
x_dist, y_dist = (220, 220)
nodes_coord_muscle = {
29815: {'name': 'electrically\nresponsive\ncell', 'x': anchor_x + x_dist * .5, 'y': anchor_y},
29813: {'name': 'contractile\ncell', 'x': anchor_x - x_dist * .5, 'y': anchor_y},
29814: {'name': 'muscle\ncell', 'x': anchor_x, 'y': anchor_y - y_dist * 1},
29823: {'name': 'smooth\nmuscle\ncell', 'x': anchor_x, 'y': anchor_y - y_dist * 2},
29822: {'name': 'vascular\nassociated\nsmooth\nmuscle\ncell', 'x': anchor_x, 'y': anchor_y - y_dist * 3.2},
}
########## REST ##########
anchor_x, anchor_y = (1600, 550)
x_dist, y_dist = (220, 220)
cols = [x_dist * 0, x_dist * 1, x_dist * 2, x_dist * 3]
cols = [anchor_x + c for c in cols]
rows = [y_dist * 0, y_dist * 1, y_dist * 2, y_dist * 3, y_dist * 4]
rows = [anchor_y - r for r in rows]
nodes_coord = {
29803: {'name': 'neural\ncell', 'x': cols[0], 'y': rows[0]},
29802: {'name': 'neuron\nassociated\ncell', 'x': cols[0], 'y': rows[1]},
29846: {'name': 'glial\ncell', 'x': cols[0], 'y': rows[2]},
29839: {'name': 'embryonic\ncell', 'x': cols[1], 'y': rows[0]},
29821: {'name': 'extraembryonic\ncell', 'x': cols[1], 'y': rows[1]},
29841: {'name': 'cardiocyte', 'x': cols[1], 'y': rows[2]},
29785: {'name': 'non-terminally\ndifferentiated\ncell', 'x': cols[2], 'y': rows[0]},
29831: {'name': 'supportive\ncell', 'x': cols[2], 'y': rows[1]},
29816: {'name': 'cellof skeletal\nmuscle', 'x': cols[2], 'y': rows[2]},
29834: {'name': 'bone\ncell', 'x': cols[3], 'y': rows[0]},
29837: {'name': 'bone\nmarrow\ncell', 'x': cols[3], 'y': rows[1]},
}
########## LEGEND ##########
anchor_x, anchor_y = (0, 2600)
x_dist, y_dist = (220, 200)
nodes_coord_legend = {
29779: {'name': '*stuff\naccumulating\ncell', 'x': anchor_x, 'y': anchor_y},
29783: {'name': '**motile\ncell', 'x': anchor_x + x_dist, 'y': anchor_y},
}
nodes_coord.update(nodes_coord_epith)
nodes_coord.update(nodes_coord_hemat)
nodes_coord.update(nodes_coord_muscle)
nodes_coord.update(nodes_coord_connect)
nodes_coord.update(nodes_coord_legend)
nodes_print_name = {
"kidney cell;;kidney epithelial cell": "kidney\nepithelial\ncell",
"CD14-positive, CD16-negative classical monocyte;;classical monocyte": "CD14-positive,\nCD16-negative\nclassical\nmonocyte",
"GAG secreting cell;;carbohydrate secreting cell": "GAG\nsecreting\ncell",
"endopolyploid cell;;hepatocyte;;metabolising cell;;polyploid cell": "hepatocyte",
"defensive cell;;phagocyte": "phagocyte",
"barrier cell;;lining cell": "lining\ncell",
"non-striated muscle cell;;smooth muscle cell;;visceral muscle cell": "smooth\nmuscle\ncell",
"glial cell;;glial cell (sensu Vertebrata)": "glial\ncell\n(sensu\nVertebrata)",
"electrically active cell;;electrically responsive cell": "electrically\nresponsive\ncell",
"alpha-beta T cell;;mature T cell;;mature alpha-beta T cell": "mature\nalpha-beta\nT cell",
"multi fate stem cell;;somatic stem cell": "multi\nfate\nstem\ncell",
}
for node_id in nodes_coord:
nodes[str(node_id)]["node_print_name"] = nodes_print_name.get(nodes[str(node_id)]["shared_name"],
nodes_coord[node_id]["name"])
nodes[str(node_id)]["y"] = nodes_coord[node_id]["y"]
nodes[str(node_id)]["x"] = nodes_coord[node_id]["x"]
ontoviewer_coords = {
"nodes": nodes,
"edges": edges
}
with open(output_graph, "w") as file_handler:
json.dump(ontoviewer_coords, file_handler, indent=4)
def parse_args():
parser = argparse.ArgumentParser(description='Make graph file with node location and edges.')
parser.add_argument('cytoscape_file', type=str, help='path to cytoscape xgmml file.')
parser.add_argument('output_graph', type=str, help='path to output file.')
args = parser.parse_args(sys.argv[1:])
return args
def cli_make_graph_coord():
args = parse_args()
main(args.cytoscape_file, args.output_graph)
if __name__ == "__main__":
CYTOSCAPE_FILE = "../../data/cytoscape_cl_display_config.xgmml"
OUTPUT_GRAPH = "../../output/ontoviewer_graph.json"
main(CYTOSCAPE_FILE, OUTPUT_GRAPH)
|
hoffmangroup/cla | claweb/claweb/controller/comparisons/comparison_list.py | <gh_stars>0
__author__ = 'mickael'
import os
from jinja2 import Environment, FileSystemLoader
from ...model import comparisons
def comparison_list(cfg, group_and_comparisons):
template_folder = os.path.join(os.path.dirname(__file__), '..', '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'comparison_list.tpl'
group_comparisons = comparisons.comparison_list(cfg, group_and_comparisons)
output = template.render(groups=group_comparisons, site=cfg['website'], tpl=child_template)
with open(os.path.join(cfg['website']['output'], "comparison_list.html"), "wb") as f:
f.write(output.encode("utf-8"))
|
hoffmangroup/cla | skrrf/setup.py | from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
exts = [Extension(name='skrrf.tree._tree', sources=['skrrf/tree/_tree.pyx']),
Extension(name='skrrf.tree._criterion', sources=['skrrf/tree/_criterion.pyx']),
Extension(name='skrrf.tree._splitter', sources=['skrrf/tree/_splitter.pyx'])]
setup(
name='skrrf',
version='1',
packages=['skrrf', 'skrrf.tree'],
url='',
license='',
author='mmendez12',
author_email='<EMAIL>',
description='',
install_requires=['numpy', 'scikit-learn'],
ext_modules=cythonize(exts),
include_dirs=[numpy.get_include()]
) |
maayanhd/safeTube | scoring_videos/Video_Scoring_System_metadata/Video_Scoring_System_metadata.py | import json
from http.server import BaseHTTPRequestHandler, HTTPServer
import requests
import simplejson
from pyspark.sql import SparkSession
import pickle
import re
filename = 'trained_video_classifier.sav'
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'JSON')
self.end_headers()
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
self._set_headers()
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
data = simplejson.loads(self.data_string)
scoring_videos_RDD = get_scoring_videos_RDD(data if isinstance(data, list) else [data])
updated_model_decision_RDD = get_model_decision_RDD(scoring_videos_RDD)
updated_model_decision_df = updated_model_decision_RDD.toDF(['id', 'bracket_count', 'contains_bad_language',
'is_safe', 'final_score'])
# updated_model_decision_df.show()
updated_videos_json = updated_model_decision_df.toJSON().collect()
result_str = '[' + ','.join(map(str, updated_videos_json)) + ']'
self.wfile.write(bytes(result_str, 'utf-8'))
def get_spark_session():
spk = SparkSession.builder \
.master("local") \
.appName("Video Scoring System") \
.config('spark.network.timeout', '60s') \
.config('spark.executor.heartbeatInterval', '75ms') \
.getOrCreate()
return spk
spark = get_spark_session()
spark_context = spark.sparkContext
def get_videos_id_bracket_count_and_is_containing_bad_words_RDD(videos_collection_json, bad_words_list):
videos_collection_RDD = spark_context.parallelize(videos_collection_json)
# show_RDD(videos_collection_RDD)
videos_RDD = videos_collection_RDD.map(lambda x: (x['id'], x['parsedSubtitles']))
# show_RDD(videos_RDD)
# get_video_bracket_count_and_are_bad_words_contained(videos_RDD.first()[1], bad_words_list)
videos_id_bracket_count_RDD = \
videos_RDD.map(lambda x: (x[0],
get_video_bracket_count_and_are_bad_words_contained(list(x[1]), bad_words_list))) \
.map(lambda id_and_scoring: (id_and_scoring[0], id_and_scoring[1][0], id_and_scoring[1][1], False,
get_calculated_scoring(id_and_scoring[1][0], id_and_scoring[1][1])))
videos_id_bracket_count_RDD.cache()
'''returning objects in this format- (id, bracket count, containing bad language, isSafePrediction,
final_scoring) '''
# show_RDD(videos_id_bracket_count_RDD)
return videos_id_bracket_count_RDD
def get_calculated_scoring(bracket_count, contains_bad_language):
return 10 - (bracket_count % 10) if not contains_bad_language and bracket_count <= 9 \
else 5 - (len(str(bracket_count)) % 5)
def get_video_bracket_count_and_are_bad_words_contained(subtitles_lst, bad_wording_list):
# Using list comprehension for mapping and counting the brackets marking cursing and bad language
bad_words_count = 0
are_bad_words_contained = False
if len(subtitles_lst) != 0 and subtitles_lst is not None:
filtered_subtitles_lst = list(filter(lambda line: line is not None, subtitles_lst))
bad_words_count = sum(len(re.findall(r'\[.__.\]', line)) for line in filtered_subtitles_lst)
are_bad_words_contained = any(re.search(rf'\b{bad_word}\b', line) for line in filtered_subtitles_lst \
for bad_word in bad_wording_list if
re.search(rf'\b{bad_word}\b', line) is not None and line is not None)
else:
# for scoring an empty subtitles video with 0 scoring
bad_words_count = 9
are_bad_words_contained = False
return bad_words_count, are_bad_words_contained
def show_RDD(rdd):
spark.read.json(rdd).show(20, truncate=False)
def get_videos_collection_resp(get_request_route):
response = requests.get(get_request_route)
return response
def get_videos_collection_keys(response):
# Check
return response.json()[0].keys()
def get_model_decision_RDD(updated_videos_RDD):
loaded_classifier = pickle.load(open(filename, 'rb'))
# Our model wasn't accurate enough so we mad a small change by subtracting 4 out of the bracket count to predict 1
# when the counter was under 5 and -1 when the counter was5 or higher than that.
# show_RDD(updated_videos_RDD.map(lambda x: x[1]))
updated_RDD = updated_videos_RDD.map(
lambda x: (
x[0], x[1], x[2], True if loaded_classifier.predict(np.array((575, x[1] - 4)).reshape((1, -1))) == 1 \
else False, x[4]))
# show_RDD(updated_RDD)
updated_RDD.map(lambda x: (x[0], x[1], x[2], x[3], x[4] - 5) if not x[3] and x[4] > 5 else x[4])
show_RDD(updated_RDD)
return updated_RDD
def get_scoring_videos_RDD(response_data=None):
# For Training the model
# resp = get_videos_collection_resp('http://localhost:8082/api/v1/youtube/getalltranscripts')
# videos_keys = get_videos_collection_keys(resp)
bad_words_df = spark.read.csv('Bad_Words_Dataset/bad-words.csv')
bad_words_list = bad_words_df.select('_c0').rdd.map(lambda row: row[0]).collect()
updated_videos_RDD = get_videos_id_bracket_count_and_is_containing_bad_words_RDD(response_data,
bad_words_list)
# For training the model
# updated_videos_RDD = get_videos_id_bracket_count_and_is_containing_bad_words_RDD(resp.json(),
# bad_words_list)
return updated_videos_RDD
# -------------------------------Classifier--------------------------------------
from typing import Optional
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from copy import deepcopy
from sklearn.tree import DecisionTreeClassifier
current_option_number = 1
spark = get_spark_session()
spark_context = spark.sparkContext
def plot_adaboost(_X: np.ndarray,
_y: np.ndarray,
clf=None,
sample_weights: Optional[np.ndarray] = None,
annotate: bool = False,
ax: Optional[mpl.axes.Axes] = None,
base_clf_errors=None,
base_clf_stump_weights=None) -> None:
""" Plot ± samples in 2D, optionally with decision boundary
:param base_clf_errors: all T errors from the training process
:param ax: axes for plotting
:param annotate:
:param clf: The Adaboost generated classifier
:param _y: labels
:param _X: samples
:param sample_weights: weights for training process
:param base_clf_stump_weights: coefficient of all T base classifiers
"""
assert set(_y) == {-1, 1}, 'Expecting response labels to be ±1'
if not ax:
fig, ax = plt.subplots(figsize=(5, 5), dpi=100)
fig.set_facecolor('white')
pad = 1
x_min, x_max = _X[:, 0].min() - pad, _X[:, 0].max() + pad
y_min, y_max = _X[:, 1].min() - pad, _X[:, 1].max() + pad
if sample_weights is not None:
sizes = np.array(sample_weights) * _X.shape[0] * 100
else:
sizes = np.ones(shape=_X.shape[0]) * 100
X_pos = _X[_y == 1]
sizes_pos = sizes[_y == 1]
ax.plot([x1 for x1, x2 in list(X_pos)], [x2 for x1, x2 in list(X_pos)], '+r',
label='sample group 1')
X_neg = _X[_y == -1]
sizes_neg = sizes[_y == -1]
ax.plot([x2 for x1, x2 in list(X_neg)], [x2 for x1, x2 in list(X_neg)], 'ob',
label='sample group 2')
if base_clf_errors is not None:
(x_min, x_max), (y_min, y_max) = plot_collection_by_t(np.asarray(list(range(len(base_clf_errors)))),
np.asarray(base_clf_errors), ax=ax, value_name="error",
min_sample_val=x_min,
max_sample_val=x_max,
min_label_val=y_min, max_label_val=y_max, marker='*',
color='y', )
if base_clf_stump_weights is not None:
plot_collection_by_t(np.asarray(list(range(len(base_clf_stump_weights)))),
np.asarray(base_clf_stump_weights), ax=ax, value_name="alpha", min_sample_val=x_min,
max_sample_val=x_max,
min_label_val=y_min, max_label_val=y_max, marker='^', color='g')
if annotate:
for i, (x, y_annotate) in enumerate(X):
offset = 0.05
ax.annotate(f'$x_{i + 1}$', (x + offset, y_annotate - offset))
ax.set_xlim(x_min + 0.5, x_max - 0.5)
ax.set_ylim(y_min + 0.5, y_max - 0.5)
ax.set_xlabel('x / iteration number')
ax.set_ylabel('$y$')
def plot_collection_by_t(x_values: np.ndarray,
y_values: np.ndarray,
ax: Optional[mpl.axes.Axes] = None,
value_name="y",
min_sample_val=0,
max_sample_val=0,
min_label_val=0,
max_label_val=0,
marker="+",
color="blue"):
pad = 1
x_min, x_max = min(x_values.min(), min_sample_val) - pad, max(x_values.max(), max_sample_val) + pad
y_min, y_max = min(y_values.min(), min_label_val) - pad, max(y_values.max(), max_label_val) + pad
print(type(ax))
ax.plot(x_values, y_values, marker + color, label=value_name)
ax.set_xlim(x_min + 0.5, x_max - 0.5)
ax.set_ylim(y_min + 0.5, y_max - 0.5)
ax.legend()
return (x_min, x_max), (y_min, y_max)
def truncate_adaboost(clf, t: int):
""" Truncate a fitted AdaBoost up to (and including) a particular iteration """
assert t > 0, 't must be a positive integer'
new_clf = deepcopy(clf)
if current_option_number == 1:
# ------------------------Decision Trees Classifiers only------------------------------------
new_clf.stumps = clf.stumps[:t]
new_clf.stump_weights = clf.stump_weights[:t]
# ------------------------Decision Trees Classifiers only------------------------------------
return new_clf
def get_mapped_transcript_dataset(scoring_videos_json):
# Referring to 1 as suitable for kids and -1 as not suitable (labels)
X_and_Y_RDD = spark_context.parallelize([json.loads(scored_vid) for scored_vid in scoring_videos_json]) \
.map(lambda scored_vid: (scored_vid['bracket_count'], 1) if scored_vid['final_score'] > 4
else (scored_vid['bracket_count'], -1)).zipWithIndex()
X_and_Y_RDD.cache()
# Samples are represented by (indexed video, bracket_count) and labels are 1 for suitable and -1 for not suitable
X_dataset, y_dataset = np.array(X_and_Y_RDD.map(lambda x: (x[1], x[0][0])).collect()), \
np.array(X_and_Y_RDD.map(lambda x: x[0][1]).collect())
return X_dataset, y_dataset
class AdaBoost:
""" AdaBoost enemble classifier from scratch """
def __init__(self):
# ------------------------Decision Trees Classifiers only------------------------------------
self.stumps = None
self.stump_weights = None
# ------------------------Decision Trees Classifiers only------------------------------------
self.errors = None
self.sample_weights = None
@staticmethod
def _check_X_y(X, y):
""" Validate assumptions about format of input data"""
assert set(y) == {-1, 1}, 'Response variable must be ±1'
return X, y
def fit(self, X: np.ndarray, y: np.ndarray, iters: int):
""" Fit the model using training data """
# X, y = self._check_X_y(X, y)
n = X.shape[0]
""" init numpy arrays """
self.sample_weights = np.zeros(shape=(iters, n))
if current_option_number == 1:
# ------------------------Decision Trees Classifiers only------------------------------------
self.stumps = np.zeros(shape=iters, dtype=object)
self.stump_weights = np.zeros(shape=iters)
# ------------------------Decision Trees Classifiers only------------------------------------
self.errors = np.zeros(shape=iters)
""" Preparing data for Logistic Regression """
X_manipulated = np.c_[np.ones((X.shape[0], 1)), X]
y_manipulated = y[:, np.newaxis]
theta = np.zeros((X_manipulated.shape[1], 1))
self.sample_weights = np.zeros(shape=(iters, n))
""" initialize weights uniformly """
self.sample_weights[0] = np.ones(shape=n) / n
for t in range(iters):
# fit weak learner
curr_sample_weights = self.sample_weights[t]
# ------------------------Decision Trees Classifiers only------------------------------------
stump = DecisionTreeClassifier(max_depth=1, max_leaf_nodes=2)
stump = stump.fit(X, y, sample_weight=curr_sample_weights)
# calculate error and stump weight from weak learner prediction
stump_pred = stump.predict(X)
err = curr_sample_weights[(stump_pred != y)].sum() # / n
stump_weight = np.log((1 - err) / err) / 2
# update sample weights
new_sample_weights = (
curr_sample_weights * np.exp(-stump_weight * y * stump_pred)
)
new_sample_weights /= new_sample_weights.sum()
# ------------------------Decision Trees Classifiers only------------------------------------
# If not final iteration, update sample weights for t+1
if t + 1 < iters:
self.sample_weights[t + 1] = new_sample_weights
# ------------------------Decision Trees Classifiers only------------------------------------
# save results of iteration
self.stumps[t] = stump
self.stump_weights[t] = stump_weight
# ------------------------Decision Trees Classifiers only------------------------------------
self.errors[t] = err
return self
def predict(self, X):
""" Make predictions using already fitted model """
stump_preds = np.array([stump.predict(X) for stump in self.stumps])
return np.sign(np.dot(self.stump_weights, stump_preds))
def plot_staged_adaboost(X, y, clf, errors, alphas, iters=10):
""" Plot weak learner and cumulative strong learner at each iteration. """
# larger grid
fig, axes = plt.subplots(figsize=(8, iters * 3),
nrows=iters,
ncols=2,
sharex=True,
dpi=45)
fig.set_facecolor('white')
_ = fig.suptitle('Decision boundaries by iteration')
for i in range(iters):
ax1, ax2 = axes[i]
clf_i = clf.stumps[i]
# Plot weak learner
_ = ax1.set_title(f'Weak learner at t={i + 1}')
plot_adaboost(X, y, clf_i,
sample_weights=clf.sample_weights[i],
annotate=False, ax=ax1, base_clf_errors=errors,
base_clf_stump_weights=alphas)
# Plot strong learner
trunc_clf = truncate_adaboost(clf, t=i + 1)
_ = ax2.set_title(f'Strong learner at t={i + 1}')
plot_adaboost(X, y, clf=trunc_clf,
sample_weights=clf.sample_weights[i],
annotate=False, ax=ax2, base_clf_errors=errors,
base_clf_stump_weights=alphas)
plt.tight_layout()
plt.subplots_adjust(top=0.95)
plt.show()
# run once in while for training model on larger amount of data
def run_adaboost():
scoring_videos_RDD = get_scoring_videos_RDD()
updated_videos_df = scoring_videos_RDD.toDF(['id', 'bracket_count', 'contains_bad_language',
'is_safe', 'final_score'])
updated_videos_json = updated_videos_df.toJSON().collect()
X, y = get_mapped_transcript_dataset(updated_videos_json)
AdaBoost_instance = AdaBoost()
classifier = AdaBoost_instance.fit(X, y, iters=10)
# save trained model to a file
pickle.dump(classifier, open(filename, 'wb'))
loaded_classifier = pickle.load(open(filename, 'rb'))
errors = AdaBoost_instance.errors
# Base classifiers coefficient
stump_weights = AdaBoost_instance.stump_weights
plot_adaboost(X, y, classifier, base_clf_errors=errors, base_clf_stump_weights=stump_weights)
plot_staged_adaboost(X, y, classifier, errors, stump_weights)
train_err = (classifier.predict(X) != y).mean()
print(f'Train error: {train_err:.1%}')
return classifier
# -------------------------------Classifier--------------------------------------
# HTTP server for listening to the DB server and provide services to the DB server (as a client)
httpd = HTTPServer(('localhost', 8081), SimpleHTTPRequestHandler)
print("server is live\n")
httpd.serve_forever()
|
mathigatti/midi2voice | midi2voice/__init__.py | <gh_stars>100-1000
import os
import sys
import urllib.request
import requests
from .midi2xml import midi2xml
def renderize_voice(lyrics, midi_path, sex="female", tempo=80, out_folder="."):
VOICE_XML_PATH = os.path.join(out_folder,"voice.xml")
VOICE_WAV_PATH = os.path.join(out_folder,"voice.wav")
midi2xml(lyrics,midi_path,VOICE_XML_PATH,tempo)
sinsy_request(VOICE_XML_PATH, VOICE_WAV_PATH,sex)
def sinsy_request(xml_file_path, wav_path, sex):
if sex == "male":
SPKR = 5
else:
SPKR = 4
headers = {'User-Agent': 'Mozilla/5.0'}
payload = {'SPKR_LANG':'english', 'SPKR':SPKR, 'VIBPOWER':'1', 'F0SHIFT':'0'}
files = {'SYNSRC': open(xml_file_path,'rb')}
# Sending post request and saving response as response object
r = requests.post(url='http://sinsy.sp.nitech.ac.jp/index.php', headers=headers, data=payload, files=files)
html_response = r.text.split("temp/")
# Magic scraping of the website to find the name of the wav file generated
url_file_name = find_wav_name_on_website(html_response)
if url_file_name is None:
raise Exception("No wav file found on sinsy.jp")
else:
download(url_file_name, wav_path)
def find_wav_name_on_website(htmlResponse):
url_file_name = None
for line in htmlResponse:
parts = line.split(".")
if parts[1][:3] == "wav":
url_file_name = parts[0]
break
return url_file_name
def download(url_file_name, wav_path):
urllib.request.urlretrieve("http://sinsy.sp.nitech.ac.jp/temp/" + url_file_name + ".wav", wav_path) |
kdfwow64/yoyo | locations/urls.py | from django.urls import path
from . import views
urlpatterns = [
path(
"<city>/",
views.RetrieveTemperatureData.as_view(),
name="retrieve_temperature_data",
)
]
|
kdfwow64/yoyo | temperature/urls.py | <reponame>kdfwow64/yoyo
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API Documentation')
# urls
urlpatterns = [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/documentation/', schema_view),
path('api/locations/', include('locations.urls')),
path('api/auth/', include('authentication.urls')),
path('admin/', admin.site.urls),
]
|
kdfwow64/yoyo | locations/request_object.py | <filename>locations/request_object.py
from dataclasses import field
from locations.utils.convertible import convertibleclass, meta
from locations.utils.validators import validate_range
@convertibleclass
class RetrieveTemperatureDataRequestObject:
"""Retrieve Temperature Data request object."""
CITY = "city"
DAYS = "days"
city: str = field(default=None)
days: int = field(
default=None, metadata=meta(value_to_field=int, validator=validate_range(1, 10))
)
|
kdfwow64/yoyo | locations/tests/request_object_tests.py | <gh_stars>0
from django.core.exceptions import ValidationError
from django.test import TestCase
from locations.request_object import RetrieveTemperatureDataRequestObject
from locations.utils.convertible import ConvertibleClassValidationError
class RetrieveTemperatureDataTestCase(TestCase):
def test_failure_request_object_with_invalid_days_string(self):
request_dict = {
"city": "London",
"days": "days"
}
with self.assertRaises(ConvertibleClassValidationError):
RetrieveTemperatureDataRequestObject.from_dict(request_dict)
def test_failure_request_object_with_invalid_days_number(self):
request_dict = {
"city": "London",
"days": 55
}
with self.assertRaises(ConvertibleClassValidationError):
RetrieveTemperatureDataRequestObject.from_dict(request_dict)
|
kdfwow64/yoyo | locations/utils/convertible.py | <reponame>kdfwow64/yoyo
import json
import logging
import typing
from dataclasses import dataclass, fields
from datetime import datetime
from decimal import Decimal
from enum import EnumMeta, Enum
from typing import Union
import aenum
from locations.exceptions import DetailedException, ErrorCodes
from locations.utils.json_utils import decamelize, camelize
logger = logging.getLogger(__name__)
__FIELDS = "_____CONVERTIBLE______"
def _is_convertible(_cls):
if hasattr(_cls, __FIELDS):
return True
return False
def _extract_metadata_callable(f, callable_name):
if hasattr(f, "metadata"):
if callable_name in f.metadata and hasattr(
f.metadata[callable_name], "__call__"
):
return f.metadata[callable_name]
return None
def _extract_metadata_boolean(f, boolean_name):
if hasattr(f, "metadata"):
if boolean_name in f.metadata and isinstance(f.metadata[boolean_name], bool):
return f.metadata[boolean_name]
return False
VALIDATOR = "validator"
REQUIRED = "required"
VAL_TO_FIELD = "val_to_field"
FIELD_TO_VAL = "field_to_val"
def meta(
validator=None, required: bool = False, field_to_value=None, value_to_field=None
):
result = {}
if validator is not None:
result[VALIDATOR] = validator
if required is not None:
result[REQUIRED] = required
if value_to_field:
result[VAL_TO_FIELD] = value_to_field
if field_to_value:
result[FIELD_TO_VAL] = field_to_value
return result
def from_dict(
cls, d: dict, use_validator_field=True, ignored_fields: Union[list, tuple] = None
):
new_instance = cls()
# Generating {field: [field_nested_ignore_value_1, field_nested_ignore_value_2]} dictionary to ignore nested fields
nested_ignore_dict = {}
if ignored_fields is not None:
nested_ignore_fields = [field for field in ignored_fields if "." in field]
for f in nested_ignore_fields:
ignore_key = f.split(".")[0]
ignore_value = f.split(f"{ignore_key}.", maxsplit=1)[1]
if ignore_key not in nested_ignore_dict:
nested_ignore_dict[ignore_key] = []
nested_ignore_dict[ignore_key].append(ignore_value)
for f in fields(cls):
field_name = f.name
if field_name.startswith("_"):
continue
if field_name[0].isupper():
continue
if isinstance(ignored_fields, (list, tuple)) and field_name in ignored_fields:
setattr(new_instance, field_name, d[field_name])
continue
field_type = None
if hasattr(f, "type"):
if not isinstance(f.type, str):
field_type = f.type
else:
# Check if string is a Platform Play class name and convert to class type if so
field_type = globals().get("registered_class", {}).get(f.type)
if not field_type:
raise RuntimeError(
"Please don't import annotations from __future__. read more here [https://bugs.python.org/issue34776]"
)
field_validator = _extract_metadata_callable(f, VALIDATOR)
field_required = _extract_metadata_boolean(f, REQUIRED)
value_to_field = _extract_metadata_callable(f, VAL_TO_FIELD)
if field_name in d:
field_value = d[field_name]
field_ignored_list = nested_ignore_dict.get(field_name)
try:
if field_validator is not None and use_validator_field:
try:
validated_ok = field_validator(field_value)
except Exception as e:
raise ConvertibleClassValidationError(
"Validation function error for [{}.{}] field with error [{}]".format(
cls.__name__, field_name, e.args
)
)
if type(field_validator) is not type and not validated_ok:
# If the validator is a simple type e.g. bool, int, field_validator will return the value, and
# we must allow False/0/empty string.
logger.warning(
"Field error [{}.{}]".format(cls.__name__, field_name)
)
raise DetailedException(
400, "Field error [{}.{}]".format(cls.__name__, field_name)
)
if isinstance(field_type, list) and not isinstance(field_value, list):
raise ConvertibleClassValidationError(
f"[{field_value}] Field type is list but field value {field_name} is not"
)
if is_list_field(field_type, field_value):
_handle_list_values(
cls,
field_name,
field_type,
field_validator,
field_value,
new_instance,
use_validator_field,
value_to_field,
field_ignored_list,
)
elif not isinstance(field_type, list):
val = _from_single_value(
field_type,
field_validator,
field_value,
use_validator_field,
field_required,
value_to_field,
field_ignored_list,
)
setattr(new_instance, field_name, val)
except CreateSingleValueWrongTypeError:
raise ConvertibleClassValidationError(
"Field [{}.{}] with value [{}] is not type of [{}]".format(
cls.__name__, field_name, field_value, field_type.__name__
)
)
except ConvertibleClassValidationError as e:
raise e
except Exception as e:
raise ConvertibleClassValidationError(
"Field [{}.{}] with value [{}] has error [{}]".format(
cls.__name__, field_name, field_value, e.args
)
)
elif field_required and use_validator_field:
raise ConvertibleClassValidationError(
"Field [{}.{}] is mandatory".format(cls.__name__, field_name)
)
if hasattr(cls, "validate"):
cls.validate(new_instance)
if hasattr(cls, "post_init"):
new_instance.post_init()
return new_instance
class CreateSingleValueWrongTypeError(ValueError):
"""It wasn't possible to deserialize single element"""
def _from_single_value(
field_type,
field_validator,
field_value,
use_validator_field,
field_required,
value_to_field=None,
ignored_fields=None,
):
if value_to_field:
field_value = value_to_field(field_value)
if isinstance(field_value, int) and field_validator is float:
# WARN: The opposite is not tolerable and MUST NOT be implemented
return float(field_value)
elif (
isinstance(field_value, float)
and isinstance(field_type, float)
and _is_convertible(field_type)
):
return Decimal(str(field_value))
elif isinstance(field_value, Decimal):
if field_validator is float:
return float(field_value)
elif field_validator is int:
return int(field_value)
elif isinstance(field_value, dict) and _is_convertible(field_type):
return field_type.from_dict(
field_value,
use_validator_field=use_validator_field,
ignored_fields=ignored_fields,
)
elif isinstance(field_type, EnumMeta):
try:
first_enum = [item for item in field_type][0]
if isinstance(first_enum, int) or isinstance(first_enum, aenum.Enum):
return field_type(field_value)
return field_type[field_value]
except KeyError as e:
raise CreateSingleValueWrongTypeError from e
elif field_type == typing.Any:
return field_value
else:
if field_value is None and not field_required:
return field_value
if field_type is not None and not isinstance(field_value, field_type):
raise CreateSingleValueWrongTypeError(
f"{field_value} is not of type {field_type}"
)
return field_value
def _handle_list_values(
cls,
field_name,
field_type,
field_validator,
field_value,
new_instance,
use_validator_field,
value_to_field,
field_ignored_list=None,
):
if value_to_field:
field_value = value_to_field(field_value)
field_instance_type = _calculate_list_field_instance_type(field_type)
if isinstance(field_instance_type, str):
# If str, check if it's a known registered Platform Play class
# and convert field_instance_type from str to class type
pp_class = globals().get("registered_class", {}).get(field_instance_type)
if pp_class:
field_instance_type = pp_class
if _is_convertible(field_instance_type):
elements = [
field_instance_type.from_dict(item, use_validator_field=use_validator_field)
for item in field_value
]
setattr(new_instance, field_name, elements)
else:
try:
elements = [
_from_single_value(
field_instance_type,
field_validator,
item,
use_validator_field,
True,
value_to_field,
field_ignored_list,
)
for item in field_value
]
setattr(new_instance, field_name, elements)
except CreateSingleValueWrongTypeError as e:
raise ValueError(
"List Field [{}.{}] with value [{}] is not type of [{}]".format(
cls.__name__, field_name, field_value, field_instance_type.__name__
)
) from e
except Exception:
raise ConvertibleClassValidationError(
"List Field [{}.{}] with value [{}] is not type of [{}]".format(
cls.__name__, field_name, field_value, field_instance_type.__name__
)
)
def _calculate_list_field_instance_type(field_type):
if _is_from_generic_list(field_type):
field_instance_type = field_type.__args__[0]
else:
field_instance_type = field_type[0]
return field_instance_type
def _is_from_generic_list(field_type):
try:
return field_type.__origin__ == list
except Exception:
return False
def is_list_field(field_type, field_value):
value_is_list = isinstance(field_value, list)
field_type_is_list = isinstance(field_type, list) or field_type == list
return (value_is_list and field_type_is_list) or _is_from_generic_list(field_type)
def to_dict(self, include_none=True, ignored_fields: Union[list, tuple] = None):
if not hasattr(self, "__dict__"):
return self
result = {}
# Generating {field: [field_nested_ignore_value_1, field_nested_ignore_value_2]} dictionary to ignore nested fields
nested_ignore_dict = {}
if ignored_fields is not None:
nested_ignore_fields = [field for field in ignored_fields if "." in field]
for f in nested_ignore_fields:
ignore_key = f.split(".")[0]
ignore_value = f.split(f"{ignore_key}.", maxsplit=1)[1]
if ignore_key not in nested_ignore_dict:
nested_ignore_dict[ignore_key] = []
nested_ignore_dict[ignore_key].append(ignore_value)
for key, val in self.__dict__.items():
key_ignore_fields = nested_ignore_dict.get(key)
if key.startswith("_") or key[0].isupper():
continue
if isinstance(ignored_fields, (list, tuple)) and key in ignored_fields:
result[key] = val
continue
field = self.__dataclass_fields__.get(key)
field_to_value = _extract_metadata_callable(field, FIELD_TO_VAL)
if isinstance(val, (list, tuple)):
if field_to_value and val:
element = field_to_value(val)
else:
element = [
_to_single_value(item, include_none, key_ignore_fields)
for item in val
]
else:
element = _to_single_value(val, include_none, key_ignore_fields)
if field_to_value and element:
element = field_to_value(element)
if include_none:
result[key] = element
elif element is not None and val is not None:
result[key] = element
return result
def _to_single_value(
val, include_none: bool, ignored_fields: Union[list, tuple] = None
):
if isinstance(val, Enum):
first_enum = [item for item in val.__class__][0]
if isinstance(first_enum, int):
return int(val)
return val.name
elif isinstance(val, Decimal):
return float(val)
elif isinstance(val, (dict, datetime)):
return val
else:
return to_dict(val, include_none, ignored_fields)
@classmethod
def from_json(
cls,
s,
*,
encoding=None,
_cls=None,
object_hook=None,
parse_float=None,
parse_int=None,
parse_constant=None,
object_pairs_hook=None,
**kw,
):
return cls.from_dict(
json.loads(
s,
encoding=encoding,
cls=_cls,
object_hook=object_hook,
parse_float=parse_float,
parse_int=parse_int,
parse_constant=parse_constant,
object_pairs_hook=object_pairs_hook,
**kw,
)
)
def to_json(
obj,
*,
include_none=True,
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
cls=None,
indent=None,
separators=None,
default=None,
sort_keys=False,
**kw,
):
return json.dumps(
obj.to_dict(include_none),
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
cls=cls,
indent=indent,
separators=separators,
default=default,
sort_keys=sort_keys,
**kw,
)
registered_class = {}
def convertibleclass(_cls):
"""Keep track of a class"""
registered_class[_cls.__name__] = _cls
_cls = dataclass(_cls)
setattr(_cls, __FIELDS, _cls.__name__)
_cls.to_dict = to_dict
_cls.from_dict = classmethod(from_dict)
_cls.to_json = to_json
_cls.from_json = from_json
return _cls
class ConvertibleClassValidationError(DetailedException):
"""ConvertibleClass validation error."""
def __init__(self, message=None):
super().__init__(
code=ErrorCodes.INVALID_REQUEST,
debug_message=message or "Invalid Request",
status_code=403,
)
class Convertible:
@classmethod
def from_dict(cls, data: dict):
_dict = {}
for k, v in data.items():
key = decamelize(k)
_dict[key] = v
return cls(**_dict)
def to_dict(self, remove_keys: [] = None) -> dict:
_dict = camelize(self.__dict__)
kwargs = _dict.pop("kwargs", None)
if kwargs:
_dict.update(kwargs)
if remove_keys:
for key in remove_keys:
_dict.pop(key, None)
return _dict
|
kdfwow64/yoyo | locations/migrations/0001_initial.py | from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
|
kdfwow64/yoyo | locations/views.py | <reponame>kdfwow64/yoyo<filename>locations/views.py
import requests
from django.conf import settings
from django.http import JsonResponse
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from locations.request_object import RetrieveTemperatureDataRequestObject
def retrieve_temperature_data(data):
"""Retrieve temperature data main stuff"""
max_temp = -100
min_temp = 100
avg_temp = 0
for item in data:
if item["day"]["maxtemp_c"] > max_temp:
max_temp = item["day"]["maxtemp_c"]
if item["day"]["mintemp_c"] < min_temp:
min_temp = item["day"]["mintemp_c"]
avg_temp = avg_temp + item["day"]["avgtemp_c"]
avg_temp = avg_temp / len(data)
median_temp = (min_temp + max_temp) / 2
return {
"maximum": max_temp,
"minimum": min_temp,
"average": avg_temp,
"median": median_temp,
}
class RetrieveTemperatureData(APIView):
permission_classes = [IsAuthenticated]
@staticmethod
def get(request, city):
if "days" not in request.query_params:
raise Exception("[days] parameter is missing")
request_obj = RetrieveTemperatureDataRequestObject.from_dict(
{
RetrieveTemperatureDataRequestObject.CITY: city,
RetrieveTemperatureDataRequestObject.DAYS: request.query_params.get(
"days"
),
}
)
response = requests.get(
f"https://api.weatherapi.com/v1/forecast.json?key={settings.WEATHER_API_KEY}&q={request_obj.city}&aqi=no&days={request_obj.days}"
)
if response.status_code != 200:
raise Exception(response.json()["error"]["message"])
rsp = retrieve_temperature_data(response.json()["forecast"]["forecastday"])
return JsonResponse(rsp, safe=False)
|
kdfwow64/yoyo | locations/exceptions.py | import logging
class ErrorCodes:
INVALID_REQUEST = 100001
class DetailedException(Exception):
status_code = 400
def __init__(
self,
code,
debug_message,
status_code=None,
payload=None,
log_level: int = logging.ERROR,
):
Exception.__init__(self)
self.code = code
self.debug_message = debug_message
if status_code is not None:
self.status_code = status_code
self.payload = payload
self.log_level = log_level
def to_dict(self):
rv = dict(self.payload or ())
rv["code"] = self.code
rv["message"] = self.debug_message
return rv
def __str__(self):
return self.debug_message or str(self.to_dict())
|
kdfwow64/yoyo | locations/utils/validators.py | <filename>locations/utils/validators.py
from typing import Union
def _value_to_int(value: Union[int, str]) -> int:
"""String value to int."""
try:
return int(value)
except ValueError as error:
raise Exception("The value is not integer")
def validate_range(min_: Union[int, float] = None, max_: Union[int, float] = None):
"""Check if value is in certain range"""
def validator(value):
value = _value_to_int(value)
if min_ is not None and max_ is not None:
msg = f"value must be between {min_} and {max_}"
valid = min_ <= value <= max_
elif min_ is not None:
msg = f"value must be above or equal to {min_}"
valid = min_ <= value
elif max_ is not None:
msg = f"value must be below or equal to {max_}"
valid = value <= max_
else:
raise TypeError(
"At least one of arguments should be provided: 'min_number', 'max_number'"
)
if not valid:
raise Exception(msg)
return True
return validator
|
indykish/chef-repo | cookbooks/megam_ganglia/templates/default/rabbitmq.py | <filename>cookbooks/megam_ganglia/templates/default/rabbitmq.py
#!/usr/bin/python2.4
import sys
import os
#import simplejson as json
import json
import urllib2
import time
from string import Template
import itertools
import threading
global url, descriptors, last_update, vhost, username, password, url_template, result, result_dict, keyToPath
INTERVAL = 10
descriptors = list()
username, password = "<PASSWORD>", "<PASSWORD>"
stats = {}
keyToPath = {}
last_update = None
#last_update = {}
compiled_results = {"nodes" : None, "queues" : None, "connections" : None}
#Make initial stat test time dict
#for stat_type in ('queues', 'connections','exchanges', 'nodes'):
# last_update[stat_type] = None
### CONFIGURATION SECTION ###
STATS = ['nodes', 'queues']
# QUEUE METRICS #
keyToPath['rmq_messages_ready'] = "%s.messages_ready"
keyToPath['rmq_messages_unacknowledged'] = "%s.messages_unacknowledged"
keyToPath['rmq_backing_queue_ack_egress_rate'] = "%s.backing_queue_status.avg_ack_egress_rate"
keyToPath['rmq_backing_queue_ack_ingress_rate'] = "%s.backing_queue_status.avg_ack_ingress_rate"
keyToPath['rmq_backing_queue_egress_rate'] = "%s.backing_queue_status.avg_egress_rate"
keyToPath['rmq_backing_queue_ingress_rate'] = "%s.backing_queue_status.avg_ingress_rate"
keyToPath['rmq_backing_queue_mirror_senders'] = "%s.backing_queue_status.mirror_senders"
keyToPath['rmq_memory'] = "%s.memory"
keyToPath['rmq_consumers'] = "%s.consumers"
keyToPath['rmq_messages'] = "%s.messages"
QUEUE_METRICS = ['rmq_messages_ready',
'rmq_messages_unacknowledged',
'rmq_backing_queue_ack_egress_rate',
'rmq_backing_queue_ack_ingress_rate',
'rmq_backing_queue_egress_rate',
'rmq_backing_queue_ingress_rate',
'rmq_backing_queue_mirror_senders',
'rmq_memory',
'rmq_consumers',
'rmq_messages']
# NODE METRICS #
keyToPath['rmq_disk_free'] = "%s.disk_free"
keyToPath['rmq_disk_free_alarm'] = "%s.disk_free_alarm"
keyToPath['rmq_fd_used'] = "%s.fd_used"
keyToPath['rmq_fd_used'] = "%s.fd_used"
keyToPath['rmq_mem_used'] = "%s.mem_used"
keyToPath['rmq_proc_used'] = "%s.proc_used"
keyToPath['rmq_sockets_used'] = "%s.sockets_used"
keyToPath['rmq_mem_alarm'] = "%s.mem_alarm" #Boolean
keyToPath['rmq_mem_binary'] = "%s.mem_binary"
keyToPath['rmq_mem_code'] = "%s.mem_code"
keyToPath['rmq_mem_proc_used'] = "%s.mem_proc_used"
keyToPath['rmq_running'] = "%s.running" #Boolean
NODE_METRICS = ['rmq_disk_free', 'rmq_mem_used', 'rmq_disk_free_alarm', 'rmq_running', 'rmq_proc_used', 'rmq_mem_proc_used', 'rmq_fd_used', 'rmq_mem_alarm', 'rmq_mem_code', 'rmq_mem_binary', 'rmq_sockets_used']
def metric_cleanup():
pass
def dig_it_up(obj,path):
try:
path = path.split('.')
return reduce(lambda x,y:x[y],path,obj)
except:
print "Exception"
return False
def refreshStats(stats = ('nodes', 'queues'), vhosts = ['/']):
global url_template
global last_update, url, compiled_results
now = time.time()
if not last_update:
diff = INTERVAL
else:
diff = now - last_update
if diff >= INTERVAL or not last_update:
print "Fetching Results after %d seconds" % INTERVAL
last_update = now
for stat in stats:
for vhost in vhosts:
if stat in ('nodes'):
vhost = '/'
result_dict = {}
urlstring = url_template.safe_substitute(stats = stat, vhost = vhost)
print urlstring
result = json.load(urllib2.urlopen(urlstring))
# Rearrange results so entry is held in a dict keyed by name - queue name, host name, etc.
if stat in ("queues", "nodes", "exchanges"):
for entry in result:
name = entry['name']
result_dict[name] = entry
compiled_results[(stat, vhost)] = result_dict
return compiled_results
def validatedResult(value):
if not isInstance(value, bool):
return float(value)
else:
return None
def list_queues(vhost):
global compiled_results
queues = compiled_results[('queues', vhost)].keys()
return queues
def list_nodes():
global compiled_results
nodes = compiled_results[('nodes', '/')].keys()
return nodes
def getQueueStat(name):
refreshStats(stats = STATS, vhosts = vhosts)
#Split a name like "rmq_backing_queue_ack_egress_rate.access"
#handle queue names with . in them
print name
split_name, vhost = name.split("#")
split_name = split_name.split(".")
stat_name = split_name[0]
queue_name = ".".join(split_name[1:])
# Run refreshStats to get the result object
result = compiled_results[('queues', vhost)]
value = dig_it_up(result, keyToPath[stat_name] % queue_name)
print name, value
#Convert Booleans
if value is True:
value = 1
elif value is False:
value = 0
return float(value)
def getNodeStat(name):
refreshStats(stats = STATS, vhosts = vhosts)
#Split a name like "rmq_backing_queue_ack_egress_rate.access"
stat_name = name.split(".")[0]
node_name, vhost = name.split(".")[1].split("#")
result = compiled_results[('nodes', '/')]
value = dig_it_up(result, keyToPath[stat_name] % node_name)
print name,value
#Convert Booleans
if value is True:
value = 1
elif value is False:
value = 0
return float(value)
def product(*args, **kwds):
# replacement for itertools.product
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def metric_init(params):
''' Create the metric definition object '''
global descriptors, stats, vhost, username, password, urlstring, url_template, compiled_results, STATS, vhosts
print 'received the following params:'
#Set this globally so we can refresh stats
if 'host' not in params:
params['host'], params['vhost'],params['username'],params['password'],params['port'] = "localhost", "/", "guest", "guest", "15672"
# Set the vhosts as a list split from params
vhosts = params['vhost'].split(',')
username, password = params['username'], params['password']
host = params['host']
port = params['port']
url = 'http://%s:%s/api/$stats/$vhost' % (host,port)
base_url = 'http://%s:%s/api' % (host,port)
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, base_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
opener.open(base_url)
urllib2.install_opener(opener)
url_template = Template(url)
print params
refreshStats(stats = STATS, vhosts = vhosts)
def metric_handler(name):
if 15 < time.time() - metric_handler.timestamp:
metric_handler.timestamp = time.time()
return refreshStats(stats = STATS, vhosts = vhosts)
def create_desc(prop):
d = {
'name' : 'XXX',
'call_back' : getQueueStat,
'time_max' : 60,
'value_type' : 'uint',
'units' : 'units',
'slope' : 'both',
'format' : '%d',
'description' : 'XXX',
'groups' : params["metric_group"],
}
for k,v in prop.iteritems():
d[k] = v
return d
def buildQueueDescriptors():
for vhost, metric in product(vhosts, QUEUE_METRICS):
queues = list_queues(vhost)
for queue in queues:
name = "%s.%s#%s" % (metric, queue, vhost)
print name
d1 = create_desc({'name': name.encode('ascii','ignore'),
'call_back': getQueueStat,
'value_type': 'float',
'units': 'N',
'slope': 'both',
'format': '%f',
'description': 'Queue_Metric',
'groups' : 'rabbitmq,queue'})
print d1
descriptors.append(d1)
def buildNodeDescriptors():
for metric in NODE_METRICS:
for node in list_nodes():
name = '%s.%s#%s' % (metric, node, '/')
print name
d2 = create_desc({'name': name.encode('ascii','ignore'),
'call_back': getNodeStat,
'value_type': 'float',
'units': 'N',
'slope': 'both',
'format': '%f',
'description': 'Node_Metric',
'groups' : 'rabbitmq,node'})
print d2
descriptors.append(d2)
buildQueueDescriptors()
buildNodeDescriptors()
# buildTestNodeStat()
return descriptors
def metric_cleanup():
pass
if __name__ == "__main__":
url = 'http://%s:%s@localhost:15672/api/$stats' % (username, password)
url_template = Template(url)
parameters = {"vhost":"/", "username":"guest","password":"<PASSWORD>", "metric_group":"rabbitmq"}
metric_init(parameters)
result = refreshStats(stats = ('queues', 'nodes'), vhosts = ('/'))
print '***'*10
getQueueStat('rmq_backing_queue_ack_egress_rate.nfl_client#/')
getNodeStat('rmq_disk_free.rmqone@inrmq01d1#/')
getNodeStat('rmq_mem_used.rmqone@inrmq01d1#/')
|
indykish/chef-repo | cookbooks/megam_ganglia/templates/default/nginx_status.py | ### This script reports nginx status stub metrics to ganglia.
### License to use, modify, and distribute under the GPL
### http://www.gnu.org/licenses/gpl.txt
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
import urllib2
logging.basicConfig(level=logging.ERROR)
_Worker_Thread = None
class UpdateNginxThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = int(params['refresh_rate'])
self.metrics = {}
self.settings = {}
self.status_url = params['status_url']
self.nginx_bin = params['nginx_bin']
self._metrics_lock = threading.Lock()
self._settings_lock = threading.Lock()
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
global _Lock
self.running = True
while not self.shuttingdown:
time.sleep(self.refresh_rate)
self.refresh_metrics()
self.running = False
@staticmethod
def _get_nginx_status_stub_response(url):
c = urllib2.urlopen(url, None, 2)
data = c.read()
c.close()
matchActive = re.search(r'Active connections:\s+(\d+)', data)
matchHistory = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', data)
matchCurrent = re.search(r'Reading:\s*(\d+)\s*Writing:\s*(\d+)\s*'
'Waiting:\s*(\d+)', data)
if not matchActive or not matchHistory or not matchCurrent:
raise Exception('Unable to parse {0}' . format(url))
result = {}
result['nginx_active_connections'] = int(matchActive.group(1))
result['nginx_accepts'] = int(matchHistory.group(1))
result['nginx_handled'] = int(matchHistory.group(2))
result['nginx_requests'] = int(matchHistory.group(3))
result['nginx_reading'] = int(matchCurrent.group(1))
result['nginx_writing'] = int(matchCurrent.group(2))
result['nginx_waiting'] = int(matchCurrent.group(3))
return result
def refresh_metrics(self):
logging.debug('refresh metrics')
try:
logging.debug(' opening URL: ' + str(self.status_url))
data = UpdateNginxThread._get_nginx_status_stub_response(self.status_url)
except:
logging.warning('error refreshing metrics')
logging.warning(traceback.print_exc(file=sys.stdout))
try:
self._metrics_lock.acquire()
self.metrics = {}
for k, v in data.items():
self.metrics[k] = v
except:
logging.warning('error refreshing metrics')
logging.warning(traceback.print_exc(file=sys.stdout))
return False
finally:
self._metrics_lock.release()
if not self.metrics:
logging.warning('error refreshing metrics')
return False
logging.debug('success refreshing metrics')
logging.debug('metrics: ' + str(self.metrics))
return True
def refresh_settings(self):
logging.debug(' refreshing server settings')
try:
p = subprocess.Popen(executable=self.nginx_bin, args=[self.nginx_bin, '-v'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
except:
logging.warning('error refreshing settings')
return False
try:
self._settings_lock.acquire()
self.settings = {}
for line in err.split('\n'):
if line.startswith('nginx version:'):
key = "nginx_server_version"
else:
continue
logging.debug(' line: ' + str(line))
line = line.split(': ')
if len(line) > 1:
self.settings[key] = line[1]
except:
logging.warning('error refreshing settings')
return False
finally:
self._settings_lock.release()
logging.debug('success refreshing server settings')
logging.debug('settings: ' + str(self.settings))
return True
def metric_of(self, name):
logging.debug('getting metric: ' + name)
try:
if name in self.metrics:
try:
self._metrics_lock.acquire()
logging.debug('metric: %s = %s' % (name, self.metrics[name]))
return self.metrics[name]
finally:
self._metrics_lock.release()
except:
logging.warning('failed to fetch ' + name)
return 0
def setting_of(self, name):
logging.debug('getting setting: ' + name)
try:
if name in self.settings:
try:
self._settings_lock.acquire()
logging.debug('setting: %s = %s' % (name, self.settings[name]))
return self.settings[name]
finally:
self._settings_lock.release()
except:
logging.warning('failed to fetch ' + name)
return 0
def metric_init(params):
logging.debug('init: ' + str(params))
global _Worker_Thread
METRIC_DEFAULTS = {
'time_max': 60,
'units': 'connections',
'groups': 'nginx',
'slope': 'both',
'value_type': 'uint',
'format': '%d',
'description': '',
'call_back': metric_of
}
descriptions = dict(
nginx_server_version={
'value_type': 'string',
'units': '',
'format': '%s',
'slope': 'zero',
'call_back': setting_of,
'description': 'Nginx version number'},
nginx_active_connections={
'description': 'Total number of active connections'},
nginx_accepts={
'slope': 'positive',
'description': 'Total number of accepted connections'},
nginx_handled={
'slope': 'positive',
'description': 'Total number of handled connections'},
nginx_requests={
'slope': 'positive',
'units': 'requests',
'description': 'Total number of requests'},
nginx_reading={
'description': 'Current connection in the reading state'},
nginx_writing={
'description': 'Current connection in the writing state'},
nginx_waiting={
'description': 'Current connection in the waiting state'})
if _Worker_Thread is not None:
raise Exception('Worker thread already exists')
_Worker_Thread = UpdateNginxThread(params)
_Worker_Thread.refresh_metrics()
_Worker_Thread.refresh_settings()
_Worker_Thread.start()
descriptors = []
for name, desc in descriptions.iteritems():
d = desc.copy()
d['name'] = str(name)
[ d.setdefault(key, METRIC_DEFAULTS[key]) for key in METRIC_DEFAULTS.iterkeys() ]
descriptors.append(d)
return descriptors
def metric_of(name):
global _Worker_Thread
return _Worker_Thread.metric_of(name)
def setting_of(name):
global _Worker_Thread
return _Worker_Thread.setting_of(name)
def metric_cleanup():
global _Worker_Thread
if _Worker_Thread is not None:
_Worker_Thread.shutdown()
logging.shutdown()
# pass
if __name__ == '__main__':
from optparse import OptionParser
try:
logging.debug('running from cmd line')
parser = OptionParser()
parser.add_option('-u', '--URL', dest='status_url', default='http://localhost/nginx_status', help='URL for Nginx status stub page')
parser.add_option('--nginx-bin', dest='nginx_bin', default='/usr/sbin/nginx', help='path to nginx')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False)
parser.add_option('-r', '--refresh-rate', dest='refresh_rate', default=15)
parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False)
(options, args) = parser.parse_args()
descriptors = metric_init({
'status_url': options.status_url,
'nginx_bin': options.nginx_bin,
'refresh_rate': options.refresh_rate
})
if options.debug:
from pprint import pprint
pprint(descriptors)
for d in descriptors:
v = d['call_back'](d['name'])
if not options.quiet:
print ' {0}: {1} {2} [{3}]' . format(d['name'], v, d['units'], d['description'])
os._exit(1)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
except StandardError:
traceback.print_exc()
os._exit(1)
finally:
metric_cleanup()
|
ilizol/kassia | font_reader.py | #!/usr/bin/python
import logging
import os
from pathlib import Path
from typing import Dict
from reportlab import rl_settings
from reportlab.lib import fontfinder
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from ruamel.yaml import YAML, YAMLError
from schema import And, Optional, Schema, SchemaError
font_classes_schema = Schema({
'family_name': str,
'takes_lyric': [str],
'standalone': [str],
Optional('keep_with_next'): [str],
Optional('lyric_offsets'): {str: float},
Optional('accidentals'): [str],
Optional('martyriae'): [str],
Optional('tempo_markings'): [str],
Optional('chronos'): [str],
Optional('rests'): [str],
Optional('optional_ligatures'): {str: {And('name'): str, And('component_glyphs'): str}},
Optional('conditional_neumes'): {str: {And('base_neume'): list, And('component_glyphs'):
list, And('replace_glyph'): str, And('draw_glyph'): str}},
})
font_glyphnames_schema = Schema({
str: {
And('family'): str,
And('codepoint'): str,
Optional('component_glyphs'): [str],
Optional('description'): str,
}
})
def _get_neume_dict(font_folder_path: str) -> Dict:
"""Search folder path for font configs, load them, and return in Dict.
:param font_folder_path: Path to font.
"""
font_config_dict = {}
for glyphname_path in Path(font_folder_path).rglob('glyphnames.yaml'):
folder = glyphname_path.parent.name
classes_path = Path.joinpath(glyphname_path.parent, 'classes.yaml')
font_config = {'glyphnames': _load_font_config(str(glyphname_path), font_glyphnames_schema),
'classes': _load_font_config(str(classes_path), font_classes_schema)}
font_config_dict[folder] = font_config
return font_config_dict
def _load_font_config(filepath: str, validator: Schema) -> Dict:
"""Read, load, and validate a font configuration, and return it as a Dict.
:param filepath: Path of font config file.
:param validator: Schema to validate against.
:return: Font configuration as a dictionary.
"""
font_config = None
with open(filepath, 'r') as fp:
try:
yaml = YAML(typ='safe', pure=True)
font_config = yaml.load(fp)
validator.validate(font_config)
except (IOError, YAMLError, SchemaError) as exc:
logging.error("Problem reading {} font configuration. {}".format(filepath, exc))
font_config = None
except Exception as exc:
raise exc
return font_config
def find_and_register_fonts(check_sys_fonts: bool = False) -> Dict:
"""Search for fonts and register them.
If check_sys_fonts is false, function will only use fonts in local
/fonts folder.
:param check_sys_fonts: Whether to search system for fonts.
:return: Font configuration as a dictionary.
"""
ff = fontfinder.FontFinder(useCache=False)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
local_font_dir = os.path.join(str(base_dir), 'kassia/fonts')
logging.info("Searching {} path for local fonts...".format(local_font_dir))
ff.addDirectory(local_font_dir, recur=True)
if check_sys_fonts:
system_font_dirs = rl_settings.TTFSearchPath
ff.addDirectories(system_font_dirs)
try:
ff.search()
except (KeyError, Exception) as fferror:
logging.warning("Problem parsing font: {}".format(fferror))
_register_fonts(ff)
return _get_neume_dict(local_font_dir)
def _register_fonts(font_finder: fontfinder.FontFinder):
"""Search font_path for TTF's and register them.
Registers discovered fonts as part of family if multiple weights are found.
ReportLab usually keeps a cache after searching a directory.
I have this cache disabled because it doesn't seem to work correctly.
If only one font in family, use family name as font name, otherwise
use familyname-fontface.
:param font_finder: Path to search for fonts.
"""
for family_name in font_finder.getFamilyNames():
fonts_in_family = font_finder.getFontsInFamily(family_name)
for font in fonts_in_family:
if len(fonts_in_family) == 1:
try:
ttfont = TTFont(family_name.decode("utf-8"), font.fileName)
pdfmetrics.registerFont(ttfont)
pdfmetrics.registerFontFamily(family_name)
except TTFError as e:
logging.warning("Could not register font {}, {}".format(family_name, e))
continue
elif len(fonts_in_family) > 1:
font_name = family_name + "-".encode() + font.styleName
font_name = font_name.decode("utf-8")
try:
ttfont = TTFont(font_name, font.fileName)
pdfmetrics.registerFont(ttfont)
addMapping(font.familyName, font.isBold, font.isItalic, font_name)
except TTFError as e:
logging.warning("Could not register font {}, {}".format(family_name, e))
continue
def is_registered_font(font_name: str) -> bool:
"""Return whether passed font is registered.
:param font_name: Name of font to check within registered fonts.
"""
return font_name in pdfmetrics.getRegisteredFontNames()
|
ilizol/kassia | syllable_line.py | <filename>syllable_line.py
from collections.abc import MutableSequence
from typing import List
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Flowable
from coord import Coord
from lyric import Lyric
from syllable import Syllable
class SyllableLine(Flowable, MutableSequence):
"""This class is a collection of Syllables.
"""
def __init__(self, leading=0, syllable_spacing=0, *args):
super().__init__(*args)
self.list: List[Syllable] = list()
self.extend(list(args))
self.leading = leading
self.syllableSpacing = syllable_spacing
def wrap(self, *args):
self.set_size()
return self.width, self.height
def draw(self, canvas: Canvas = None):
"""This class is overloaded from Flowable's draw function.
Note: If a score gets split, platypus will treat the syllableLine
as a Flowable and call this draw function without any parameters.
:param canvas: The canvas to draw on. Canvas only gets received as an
argument when this draw function is called by Score directly.
"""
if not canvas:
canvas = self.canv
for syl in self.list:
syl.draw(canvas)
self.draw_dashes(canvas)
self.draw_extenders(canvas)
def draw_dashes(self, canvas):
"""Draw dashes connecting lyrics in a line of syllables.
Loop through syllables in list. Draw dash whenever connector is found.
Dashes after syllable come after lyric, while dashes on no lyric are
centered under syllable.
:param canvas: The canvas to draw extender on.
"""
starting_lyric = None
for syl in self.list:
if syl.contains_connector_type('d'):
if starting_lyric is None:
starting_lyric = syl.lyric
# Start of word
if syl.lyric.text is not None:
starting_lyric = syl.lyric
coord = self._get_initial_dash_position(syl)
# Middle of word
else:
coord = syl.lyric_pos
if syl.takes_lyric:
self._draw_dash(canvas,
coord,
starting_lyric.color,
starting_lyric.font_family,
starting_lyric.font_size)
# End of word
elif starting_lyric is not None:
starting_lyric = None
def draw_extenders(self, canvas):
"""Draw extenders connecting two or more sets of lyrics in a line.
Loop through syllables in list. Begin extender if necessary.
When encountering new lyric, end current extender and begin new one.
Keep adding end of neume pos as end of extender.
Draw extender if get to end of line.
:param canvas: The canvas to draw extender on.
"""
starting_lyric, x1, x2, y1, y2 = None, None, None, None, None
for i, syl in enumerate(self.list):
if syl.contains_connector_type('u'):
# Begin extender if necessary
if x1 is None:
starting_lyric = syl.lyric
y1, y2 = (syl.lyric_pos.y, syl.lyric_pos.y)
# If starting new line with extender, begin extender at front of neume
# Otherwise begin right after current lyric
if i == 0 and syl.lyric.text is None:
x1 = syl.neume_chunk_pos.x
else:
x1 = self._get_extender_start_position(syl)
# If encounter new lyric, end current extender and start new one
if syl.lyric.text is not None and x2:
# If syneches elafron, special case
if syl.lyric_offset:
x2 = self._get_special_extender_end_position(syl)
self._draw_extender(canvas, x1, y1, x2, y2, starting_lyric)
starting_lyric = syl.lyric
x1 = self._get_extender_start_position(syl)
# Set extender end to ending position of neume
x2 = self._get_extender_end_position(syl)
# If no more extender connection, end and draw it
elif x1 is not None and x2 is not None:
if syl.base_neume.name == 'syne' and x1:
x2 = self._get_special_extender_end_position(syl)
self._draw_extender(canvas, x1, y1, x2, y2, starting_lyric)
x1, x2 = None, None
# If extender goes to end of line
if x1 is not None:
self._draw_extender(canvas, x1, y1, x2, y2, starting_lyric)
@staticmethod
def _get_initial_dash_position(syl: Syllable) -> Coord:
"""Return dash position for the passed lyric.
:param syl: Current syllable.
:returns: Dash position as Coordinate
"""
lyric_space_width = pdfmetrics.stringWidth(' ', syl.lyric.font_family, syl.lyric.font_size)
return Coord(syl.lyric_pos.x + syl.lyric.width + (lyric_space_width * 2), syl.lyric_pos.y)
@staticmethod
def _get_extender_start_position(syl: Syllable) -> float:
"""Return extender starting position (x position), after lyric.
:param syl: Current syllable.
"""
lyric_space_width = pdfmetrics.stringWidth(' ', syl.lyric.font_family, syl.lyric.font_size)
return syl.lyric_pos.x + syl.lyric.width + lyric_space_width
@staticmethod
def _get_extender_end_position(syl: Syllable) -> float:
"""Return extender end position (x position), which is at end of neume (neume width).
:param syl: Current syllable.
"""
return syl.neume_chunk_pos.x + syl.width
@staticmethod
def _get_special_extender_end_position(syl: Syllable) -> float:
"""Return extender end position when special lyric offset.
:param syl: Current syllable.
"""
return syl.neume_chunk_pos.x + syl.lyric_offset
@staticmethod
def _draw_extender(canvas: Canvas, x1: float, y1: float, x2: float, y2: float, starting_lyric: Lyric):
"""Draw an underscore extender connecting two or more sets of lyrics.
:param canvas: The canvas to draw extender to.
:param x1: Start of extender, x coordinate.
:param y1: Start of extender, y coordinate.
:param x2: End of extender, x coordinate.
:param y2: End of extender, y coordinate.
:param starting_syllable: Syllable which starts the extender.
"""
if x1 is not None and x2 is not None:
canvas.setStrokeColor(starting_lyric.color)
canvas.setFont(starting_lyric.font_family, starting_lyric.font_size)
canvas.line(x1, y1, x2, y2)
@staticmethod
def _draw_dash(canvas: Canvas, dash_coord: Coord, color: str, font_family: str, font_size: int):
"""Draw a set of dashes connecting two or more sets of lyrics.
:param canvas: The canvas to draw extender to.
:param dash_coord: Position to draw dash at.
:param color: Color of dash to draw.
:param font_family: Font family of dash to draw.
:param font_size: Font size of dash to draw.
"""
canvas.setFillColor(color)
canvas.setFont(font_family, font_size)
canvas.drawCentredString(dash_coord.x, dash_coord.y, '-')
def set_size(self):
if self.list:
width = (self.list[-1].neume_chunk_pos.x + self.list[-1].width) - self.list[0].neume_chunk_pos.x
self.width = width
max_syl_height = max(syl.height for syl in self.list)
self.height = max(max_syl_height, self.leading)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
self.set_size()
del self.list[i]
def __setitem__(self, i, v):
self.set_size()
self.list[i] = v
def insert(self, i, v):
self.set_size()
self.list.insert(i, v)
def __str__(self):
return str(self.list)
|
Vahagn-Zaqaryan/fetch-text-python | words.py | #!/usr/bin/env python3
'''Retrivate and print words from a URL.
Usage:
python3 words.ph <URL>
'''
import sys
from urllib.request import urlopen
def fetch_words(url):
'''Fetch a list of words from URL.
Args:
url: The URL of UTF-8 text documnet.
Returns:
A list of strings containing the words from the document
'''
with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.decode('utf-8').split()
for word in line_words:
story_words.append(word)
return story_words
def print_item(items):
'''Prints items one per line.
Args:
items: An iterable series of printable items.
'''
for item in items:
print(item)
def main(url):
'''Prints each word from a text documnet from a URL
Args:
url: The URL of UTF-8 text documnet.
'''
words = fetch_words(url)
print_item(words)
if __name__ == '__main__':
main(sys.argv[1]) # For example you can use this 'http://sixty-north.com/c/t.txt' url
|
Keith234/python_training | model/contact.py | <reponame>Keith234/python_training
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middlename=None, lastname=None, nicknam=None, title=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None,
email=None, email2=None, email3=None, secondaryphone=None, id=None, all_phones_from_home_page=None,
all_emails_from_home_page=None, all_phones_from_db=None, all_emails_from_db=None):
self.first_name = first_name
self.middlename = middlename
self.lastname = lastname
self.nicknam = nicknam
self.title = title
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
self.secondaryphone = secondaryphone
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
self.email = email
self.email2 = email2
self.email3 = email3
self.id = id
self.all_phones_from_db = all_phones_from_db
self.all_emails_from_db = all_emails_from_db
def __repr__(self):
return "%s : %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" % (
self.id, self.lastname, self.first_name, self.address, self.home, self.mobile, self.work, self.fax,
self.email, self.email2, self.email3)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and \
(self.first_name is None or other.first_name is None or self.first_name == other.first_name) \
and (self.lastname is None or other.lastname is None or self.lastname == other.lastname) and (
self.address is None or other.address is None or self.address == other.address) and (
self.home is None or other.home is None or self.home == other.home) and (
self.mobile is None or other.mobile is None or self.mobile == other.mobile) and (
self.work is None or other.work is None or self.work == other.work) and (
self.fax is None or other.fax is None or self.fax == other.fax) and (
self.email is None or other.email is None or self.email == other.email) and (
self.email2 is None or other.email2 is None or self.email2 == other.email2) and (
self.email3 is None or other.email3 is None or self.email3 == other.email3)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
Keith234/python_training | test/test_modify_contact.py | import random
from model.contact import Contact
def test_modify_contact_by_id(app, db, check_ui):
app.open_home_page()
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Nana", middlename="Ver", lastname="Ko"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
modified_contact = Contact(first_name="Dina", middlename="Tor", lastname="Mia", nicknam="Mom",
title="TJT", company="JIT", address="45 Street", home="0985477",
mobile="0453665363", work="6468964", fax="86543",
email="<EMAIL>", email2="<EMAIL>",
email3="<EMAIL>")
modified_contact.id = contact.id
app.contact.modify_contact_by_id(contact.id, modified_contact)
assert len(old_contacts) == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
old_contacts.append(modified_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
key=Contact.id_or_max)
def test_modify_first_contact_name(app):
app.open_home_page()
if app.contact.count() == 0:
app.contact.create(Contact(first_name="Nana", middlename="Ver", lastname="Ko"))
old_contacts = app.contact.get_contact_list()
contact = Contact(first_name="Dana")
contact.id = old_contacts[0].id
app.contact.modify_first_contact(contact)
assert len(old_contacts) == app.contact.count()
new_contacts = app.contact.get_contact_list()
old_contacts[0] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
Keith234/python_training | data/contacts.py | from model.contact import Contact
testdata = [
Contact(first_name="Nick", middlename="Toro", lastname="Delever", nicknam="Op",
title="Redsa", company="Ytggg", address="htfvv 56 8", home="887654456",
mobile="876567", work="87654567", fax="7656", secondaryphone="45678",
email="<EMAIL>", email2="<EMAIL>",
email3=""),
Contact(first_name="Dana", middlename="Terasa", lastname="Gedas", nicknam="Ytrsf",
title="Hyrfg", company="99999999", address="rfscc45f", home="*****",
mobile="9876544567", work="876567", fax="876567", secondaryphone="09876545",
email="", email2="",
email3="<EMAIL>")
]
|
Keith234/python_training | test/test_information_comparison.py | <gh_stars>0
import re
from random import randrange
from model.contact import Contact
def test_information_comparison_between_homepage_and_edit_page(app):
contact_list = app.contact.get_contact_list()
index = randrange(len(contact_list))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.first_name == contact_from_edit_page.first_name
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
def test_information_comparison_between_homepage_and_data_base(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Nana", middlename="Ver", lastname="Ko"))
else:
pass
contacts_from_home_page = app.contact.get_contact_list()
contact_from_data_base = db.get_member_list_as_at_ui()
assert len(contacts_from_home_page) == len(contact_from_data_base)
contacts_from_home_page_sorted = sorted(contacts_from_home_page, key=Contact.id_or_max)
contact_from_data_base_sorted = sorted(contact_from_data_base, key=Contact.id_or_max)
for x in range(len(contacts_from_home_page_sorted)):
assert contacts_from_home_page_sorted[x].first_name == contact_from_data_base_sorted[x].first_name.strip()
assert contacts_from_home_page_sorted[x].lastname == contact_from_data_base_sorted[x].lastname.strip()
assert contacts_from_home_page_sorted[x].address == contact_from_data_base_sorted[x].address
assert contacts_from_home_page_sorted[x].all_emails_from_home_page == merge_emails_like_on_home_page(
contact_from_data_base_sorted[x])
assert contacts_from_home_page_sorted[x].all_phones_from_home_page == merge_phones_like_on_home_page(
contact_from_data_base_sorted[x])
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.home, contact.mobile,
contact.work,
contact.secondaryphone]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.email, contact.email2,
contact.email3]))))
|
Keith234/python_training | fixture/db.py | import pymysql
from model.contact import Contact
from model.group import Group
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(
"select id, firstname, middlename, lastname, company from addressbook WHERE deprecated = '0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, middlename, lastname, company) = row
list.append(Contact(id=str(id), first_name=firstname, middlename=middlename, lastname=lastname,
company=company))
finally:
cursor.close()
return list
def get_member_list_with_merged_emails_and_phones(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(
"select id, firstname, lastname, address, home, mobile, work, phone2, email, email2, email3 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, address, home, mobile, work, fax, email, email2, email3) = row
list.append(
Contact(id=str(id), first_name=firstname, lastname=lastname, address=address,
all_phones_from_home_page=home + mobile + work + fax,
all_emails_from_home_page=email + email2 + email3))
finally:
cursor.close()
return list
def get_member_list_as_at_ui(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(
"select id, firstname, lastname, address, email, email2, email3, home, mobile, work, phone2 from addressbook where deprecated = '0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, address, email, email2, email3, home, mobile, work, fax) = row
list.append(
Contact(id=str(id), first_name=firstname, lastname=lastname, address=address, home=home,
mobile=mobile, work=work, fax=fax, email=email, email2=email2, email3=email3,
all_emails_from_db=email + email2 + email3, all_phones_from_db=home + mobile + work + fax))
finally:
cursor.close()
return list
|
Keith234/python_training | test/test_add_contact_to_group.py | <reponame>Keith234/python_training
from fixture.orm import ORMFixture
from model.contact import Contact
import random
from model.group import Group
db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_add_contact_to_group(app):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Nana", middlename="Ver", lastname="Ko"))
contacts = db.get_contact_list()
contact = random.choice(contacts)
app.contact.select_contact_checkbox_by_id(contact.id)
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
group = db.get_group_list()
selected_group = random.choice(group)
old_list_of_contacts_in_group = len(db.get_contacts_in_group(selected_group))
app.group.select_group_from_group_dropdown_menu(selected_group.id)
app.contact.add_contact_in_group_by_id()
new_list_of_contacts_in_group = len(db.get_contacts_in_group(selected_group))
assert old_list_of_contacts_in_group + 1 == new_list_of_contacts_in_group
|
Keith234/python_training | test/test_delete_contact_from_group.py | from fixture.orm import ORMFixture
from model.contact import Contact
import random
from model.group import Group
db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_delete_contact_to_group(app):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Nana", middlename="Ver", lastname="Ko"))
contacts = db.get_contact_list()
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
groups = db.get_group_list()
selected_group = random.choice(groups)
list_of_contacts_in_group = db.get_contacts_in_group(selected_group)
if len(list_of_contacts_in_group) == 0:
contact = random.choice(contacts)
app.contact.select_contact_checkbox_by_id(contact.id)
app.group.select_group_from_group_dropdown_menu(selected_group.id)
app.contact.add_contact_in_group_by_id()
list_of_contacts_in_group = db.get_contacts_in_group(selected_group)
count_contacts_in_group_before_deleting = len(db.get_contacts_in_group(selected_group))
app.contact.open_group_page_with_members(selected_group.id)
selected_contact = random.choice(list_of_contacts_in_group)
app.contact.delete_contact_from_group_by_id(selected_contact.id)
count_contacts_in_group_after_deleting = len(db.get_contacts_in_group(selected_group))
assert count_contacts_in_group_before_deleting - 1 == count_contacts_in_group_after_deleting
|
yogeshwarreddy13/local_server | src/csv_to_db_package/crud_operations_db.py | <filename>src/csv_to_db_package/crud_operations_db.py<gh_stars>0
"""
This module contains useful CRUD operation functions that
can be used for database table.
"""
import logging
from mysql.connector import connect, errors
import pymysql
import pandas
import boto3
import os
import csv
import requests
import json
logging.basicConfig(filename='server_info.log', level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
config = {"host": "localhost",
"user": "root",
"database": "csvfile_upload",
"password": "<PASSWORD>"}
"""The script contains different functions for CRUD operations in database."""
def view_db_data(url):
"""The function creates the data table when the file was uploaded on the browser.
:param db_name: database name in string format
:param db_table: database table name in string format
:return : a table format of file uploaded on browser itself."""
try:
# if db_name is not None and db_table is not None:
# conn = connect(host='localhost',
# # database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# if conn.is_connected():
# cursor = conn.cursor()
#
# cursor.execute("SELECT * FROM {}.{}".format(db_name, db_table))
# columns = cursor.column_names
# data = cursor.fetchall()
# conn.close()
get_from_url = requests.get(url)
get_from_url = json.loads(get_from_url.text)
columns = tuple(get_from_url["body"][0].keys())
data = []
for i in get_from_url["body"]:
data.append(tuple(i.values()))
html_table = ''
html_table += '<table><tr>'
for i, _ in enumerate(columns):
html_table += '<th id="thead_{}">{}</th>'.format(i, columns[i])
html_table += '<td><button type="submit" onclick="redirect_to_create()">'\
'Create</button></td>'
html_table += '<td><button type="submit" onclick="redirect_to_S3()">' \
'store data to S3</button></td>'
html_table += '</tr>'
for rows, _ in enumerate(data):
html_table += '<tr id={}>'.format(rows)
for item in range(len(data[rows])):
html_table += '<td class="row-data"> {}'\
'</td>'.format(data[rows][item])
html_table += '<td><button type="button" onclick="my_button_click_handler()">'\
'Delete</button>'
html_table += '</td>'
html_table += '<td><button type="submit" onclick="my_update_data()">'\
'Update</button></td>'
html_table += '</tr>'
html_table += '</table>'
return html_table
# return "Db {} or Table {} doesn't exist".format(db_name, db_table)
except errors.ProgrammingError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
raise
except errors.Error as err:
logging.error('%s: %s', err.__class__.__name__, err)
raise
def delete_db_row(url, object_id):
"""The function deletes the row from the database by pressing delete button from browser.
:param - database name in string format
:param - database table name in string format
:param - objectID which user want to delete
:return - show the table by removing the particular row"""
try:
# if db_name is not None and db_table is not None:
# conn = connect(host='localhost',
# # database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# if conn.is_connected():
# cursor = conn.cursor()
#
# cursor.execute("DELETE FROM {}.{} WHERE objectID={}"
# .format(db_name, db_table, object_id))
# conn.commit()
# conn.close()
data = object_id
delete_request = requests.delete(url, data=data)
return "Deleted Successfully"
# return None
except errors.ProgrammingError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
raise
except errors.Error as err:
logging.error('%s: %s', err.__class__.__name__, err)
raise
def insert_db_row(url, dict_values: dict):
"""The function insert the row in the database.
:param - database name in string format
:param - database table name in string format
:return - show the table by inserting the particular row"""
try:
# if db_name is not None and db_table is not None:
# conn = connect(host='localhost',
# # database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# if conn.is_connected():
# cursor = conn.cursor(buffered=True)
# columns = list(dict_values.keys())
# columns = ','.join(columns)
# values = [dict_values[col] for col in dict_values]
# cursor.execute(f'INSERT INTO {db_name}.{db_table}'
# f'({columns}) VALUES {tuple(values)}')
# conn.commit()
get_from_url = requests.get(url)
get_from_url = json.loads(get_from_url.text)
columns = tuple(get_from_url["body"][0].keys())
data = {}
for i in columns:
data[i] = dict_values[i]
post_request = requests.post(url, json=data)
return "Successfully Inserted"
# return None
except errors.IntegrityError as in_err:
logging.error('%s: %s', in_err.__class__.__name__, in_err)
raise
except errors.DatabaseError as db_err:
logging.error('%s: %s', db_err.__class__.__name__, db_err)
raise
except errors.Error as err:
logging.error('%s: %s', err.__class__.__name__, err)
raise
def update_db_row(url, dict_values: dict, object_id):
"""The function updates the row from the database by pressing update button from browser.
:param - database name in string format
:param - database table name in string format
:param - dictionary of all values of a particular row selected by the user
:param - objectID which user want to delete
:return - show the table by updating the particular row"""
try:
# if db_name is not None and db_table is not None:
# conn = connect(host='localhost',
# # database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# if conn.is_connected():
# cursor = conn.cursor()
#
# d_values = [f"{key}" + "=" + f"'{dict_values[key]}'" for key in dict_values]
# join_values = ','.join(d_values)
# cursor.execute(f"UPDATE {db_name}.{db_table} SET {join_values} "
# f"WHERE objectID={object_id}")
# conn.commit()
# conn.close()
data = {"target": {"objectID": f'{object_id}'}, "set": dict_values}
put_request = requests.put(url, json=data)
return "Data updated successfully"
# return None
except errors.IntegrityError as in_err:
logging.error('%s: %s', in_err.__class__.__name__, in_err)
raise
except errors.ProgrammingError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
raise
except errors.Error as err:
logging.error('%s: %s', err.__class__.__name__, err)
raise
def select_db_row(url, object_id):
"""The function is used to get the row from the database by pressing update
for the prefilling of the data to the new page to update it.
:param - database name in string format
:param - database table name in string format
:param - objectID which user want to update
:return - show the prefilled form on the new page to update the data"""
try:
# if db_name is not None and db_table is not None:
# conn = connect(host='localhost',
# database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# output = ''
# if conn.is_connected():
# cursor = conn.cursor()
#
# cursor.execute("SELECT * FROM {}.{} WHERE objectID={}"
# .format(db_name, db_table, object_id))
# columns = cursor.column_names
# value = cursor.fetchone()
output = ''
get_from_url = requests.get(url)
get_from_url = json.loads(get_from_url.text)
columns = tuple(get_from_url["body"][0].keys())
for i, _ in enumerate(get_from_url["body"]):
if int(get_from_url["body"][i]["objectID"]) == int(object_id):
value = [value for value in get_from_url["body"][i].values()]
for i, _ in enumerate(columns):
output += f'{columns[i]} <input name="{columns[i]}" type="text" ' \
f'value="{value[i]}"><br>'
output += '<button type="submit" onclick="redirect_to_viewtable()">Update</button>'
# conn.close()
return output
# return None
except errors.DatabaseError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
raise
except errors.Error as err:
logging.error('%s: %s', err.__class__.__name__, err)
raise
def upload_to_s3(csv_file):
# conn = pymysql.connect(host='localhost', user='root', password = '<PASSWORD>', db='csvfile_upload')
# cursor = conn.cursor()
# query = 'select * from csvfile_data'
# cursor.execute(query)
# with open("output.csv", "w") as outfile:
# writer = csv.writer(outfile, quoting=csv.QUOTE_NONNUMERIC)
# writer.writerow(col[0] for col in cursor.description)
# for row in cursor:
# writer.writerow(row)
s3_client = boto3.client('s3')
s3_client.upload_file(csv_file, "yogesh-bucket", "myoutput.csv")
|
yogeshwarreddy13/local_server | create_database.py | """
This module is to create a database
"""
from mysql.connector import connect, Error
try:
conn = connect(host='localhost', user='root',
password='<PASSWORD>')
if conn.is_connected():
cursor = conn.cursor()
cursor.execute("CREATE DATABASE fake_db")
print("Database created")
except Error as error_e:
print("Error while connecting to MySQL", error_e)
|
yogeshwarreddy13/local_server | test_fakeDB.py | from unittest import TestCase
from unittest.mock import patch
import sys
import os
import mysql.connector
from mysql.connector import errorcode
import csv_to_db_package.crud_operations_db
MYSQL_HOST = "localhost"
MYSQL_DB = "fake_db"
MYSQL_USER = "root"
MYSQL_PASSWORD = "<PASSWORD>"
class MockDB(TestCase):
@classmethod
def setUpClass(cls):
conn = mysql.connector.connect(
host=MYSQL_HOST,
user=MYSQL_USER,
password=<PASSWORD>
)
cursor = conn.cursor(dictionary=True)
try:
cursor.execute(f"DROP DATABASE {MYSQL_DB}")
cursor.close()
print("DB dropped")
except mysql.connector.Error as err:
print(f"{MYSQL_DB}{err}")
cursor = conn.cursor()
try:
cursor.execute(
f"CREATE DATABASE {MYSQL_DB}")
except mysql.connector.Error as err:
print(f"Failed creating database: {err}")
sys.exit(1)
conn.database = MYSQL_DB
query = """CREATE TABLE test_table (objectId int(11) NOT NULL,isHighlight tinyint(1) NOT NULL,
accessionNumber varchar(50) NOT NULL,accessionYear int(11) NOT NULL,
isPublicDomain tinyint(1) NOT NULL,primaryImage varchar(50) DEFAULT NULL,
primaryImageSmall varchar(50) DEFAULT NULL,additionalImages varchar(50) DEFAULT NULL,
department varchar(50) NOT NULL,objectName varchar(50) NOT NULL,
title varchar(50) DEFAULT NULL,culture varchar(50) DEFAULT NULL,period varchar(50) DEFAULT NULL,
dynasty varchar(50) DEFAULT NULL,reign varchar(50) DEFAULT NULL,portfolio varchar(50) DEFAULT NULL,
artistRole varchar(50) DEFAULT NULL,artistPrefix varchar(50) DEFAULT NULL,
artistDisplayName varchar(50) DEFAULT NULL,artistDisplayBio varchar(200) DEFAULT NULL,
artistSuffix varchar(50) DEFAULT NULL,artistAlphaSort varchar(50) DEFAULT NULL,
artistNationality varchar(50) DEFAULT NULL,artistBeginDate varchar(50) DEFAULT NULL,
artistEndDate varchar(50) DEFAULT NULL,artistGender varchar(20) NOT NULL,
artistWikidata_URL varchar(50) DEFAULT NULL,artistULAN_URL varchar(50) DEFAULT NULL,
objectDate varchar(50) DEFAULT NULL,objectBeginDate varchar(50) DEFAULT NULL,
objectEndDate varchar(50) DEFAULT NULL,medium varchar(50) DEFAULT NULL,
dimensions varchar(50) DEFAULT NULL,measurements varchar(50) DEFAULT NULL,
creditLine varchar(50) DEFAULT NULL,geographyType varchar(50) DEFAULT NULL,
city varchar(50) DEFAULT NULL,state varchar(50) DEFAULT NULL,
county varchar(50) DEFAULT NULL,country varchar(50) DEFAULT NULL,
region varchar(50) DEFAULT NULL,subregion varchar(50) DEFAULT NULL,
locale varchar(50) DEFAULT NULL,locus varchar(50) DEFAULT NULL,excavation varchar(50) DEFAULT NULL,
river varchar(50) DEFAULT NULL,classification varchar(50) DEFAULT NULL,rightsAndReproduction varchar(50) DEFAULT NULL,
linkResource varchar(50) DEFAULT NULL,metadataDate varchar(50) DEFAULT NULL,
repository varchar(50) DEFAULT NULL,objectURL varchar(50) DEFAULT NULL,
tags varchar(50) DEFAULT NULL,objectWikidata_URL varchar(50) DEFAULT NULL,
isTimelineWork tinyint(1) NOT NULL,galleryNumber int(11) DEFAULT NULL,constituentID double DEFAULT NULL,
role varchar(50) DEFAULT NULL,name varchar(50) DEFAULT NULL,constituentULAN_URL varchar(50) DEFAULT NULL,
constituentWikidata_URL varchar(50) DEFAULT NULL,gender varchar(20) NOT NULL,PRIMARY KEY (objectId))
"""
try:
cursor.execute(query)
conn.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("test_table already exists.")
else:
print(err.msg)
else:
print("OK")
cursor.close()
conn.close()
testconfig = {
'host': MYSQL_HOST,
'user': MYSQL_USER,
'password': <PASSWORD>,
'database': MYSQL_DB
}
cls.mock_db_config = patch.dict(csv_to_db_package.crud_operations_db.config, testconfig)
@classmethod
def tearDownClass(cls):
cnx = mysql.connector.connect(
host=MYSQL_HOST,
user=MYSQL_USER,
password=<PASSWORD>
)
cursor = cnx.cursor()
# drop test database
try:
cursor.execute(f"DROP DATABASE {MYSQL_DB}")
cnx.commit()
cursor.close()
except mysql.connector.Error as err:
print(f"Database {MYSQL_DB} does not exists. Dropping db failed {err}")
cnx.close() |
yogeshwarreddy13/local_server | src/csv_to_db_package/csv_to_db.py | <gh_stars>0
"""
This module is to upload csv file to DataBase
"""
import logging
import pandas as pd
from mysql.connector import connect, errors
logging.basicConfig(filename="server_info.log", level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
CREATE_TABLE_QUERY = """CREATE TABLE csvfile_data (objectId int(11) NOT NULL,
isHighlight tinyint(1) NOT NULL,
accessionNumber varchar(50) NOT NULL,accessionYear int(11) NOT NULL,
isPublicDomain tinyint(1) NOT NULL,primaryImage varchar(50) DEFAULT NULL,
primaryImageSmall varchar(50) DEFAULT NULL,additionalImages varchar(50) DEFAULT NULL,
department varchar(50) NOT NULL,objectName varchar(50) NOT NULL,
title varchar(50) DEFAULT NULL,culture varchar(50) DEFAULT NULL,period varchar(50) DEFAULT NULL,
dynasty varchar(50) DEFAULT NULL,reign varchar(50) DEFAULT NULL,portfolio varchar(50) DEFAULT NULL,
artistRole varchar(50) DEFAULT NULL,artistPrefix varchar(50) DEFAULT NULL,
artistDisplayName varchar(50) DEFAULT NULL,artistDisplayBio varchar(200) DEFAULT NULL,
artistSuffix varchar(50) DEFAULT NULL,artistAlphaSort varchar(50) DEFAULT NULL,
artistNationality varchar(50) DEFAULT NULL,artistBeginDate varchar(50) DEFAULT NULL,
artistEndDate varchar(50) DEFAULT NULL,artistGender varchar(20) NOT NULL,
artistWikidata_URL varchar(50) DEFAULT NULL,artistULAN_URL varchar(50) DEFAULT NULL,
objectDate varchar(50) DEFAULT NULL,objectBeginDate varchar(50) DEFAULT NULL,
objectEndDate varchar(50) DEFAULT NULL,medium varchar(50) DEFAULT NULL,
dimensions varchar(50) DEFAULT NULL,measurements varchar(50) DEFAULT NULL,
creditLine varchar(50) DEFAULT NULL,geographyType varchar(50) DEFAULT NULL,
city varchar(50) DEFAULT NULL,state varchar(50) DEFAULT NULL,
county varchar(50) DEFAULT NULL,country varchar(50) DEFAULT NULL,
region varchar(50) DEFAULT NULL,subregion varchar(50) DEFAULT NULL,
locale varchar(50) DEFAULT NULL,locus varchar(50) DEFAULT NULL,excavation varchar(50) DEFAULT NULL,
river varchar(50) DEFAULT NULL,classification varchar(50) DEFAULT NULL,rightsAndReproduction varchar(50) DEFAULT NULL,
linkResource varchar(50) DEFAULT NULL,metadataDate varchar(50) DEFAULT NULL,
repository varchar(50) DEFAULT NULL,objectURL varchar(50) DEFAULT NULL,
tags varchar(50) DEFAULT NULL,objectWikidata_URL varchar(50) DEFAULT NULL,
isTimelineWork tinyint(1) NOT NULL,galleryNumber int(11) DEFAULT NULL,constituentID double DEFAULT NULL,
role varchar(50) DEFAULT NULL,name varchar(50) DEFAULT NULL,constituentULAN_URL varchar(50) DEFAULT NULL,
constituentWikidata_URL varchar(50) DEFAULT NULL,gender varchar(20) NOT NULL,PRIMARY KEY (objectId))
"""
def csv_to_db_func(file_name):
"""The function reads a file that was uploaded by the user to the server.
It creates connection to the database and that file was dumped to the database.
: param file_name : csv file uploaded by the user."""
logging.info("Welcome to csv func")
df_data = pd.read_csv(file_name, index_col=False, delimiter=',')
replacement = {'height_feet': 0.0, 'height_inches': 0.0,
'position': "missing", 'weight_pounds': 0.0}
df_data.fillna(value=replacement, inplace=True)
df_data.fillna(0, inplace=True)
try:
conn = connect(host='localhost',
database="csvfile_upload",
user='root',
password='<PASSWORD>')
if conn.is_connected():
cursor = conn.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
logging.info("You're connected to database: %s", record)
cursor.execute('DROP TABLE IF EXISTS csvfile_data;')
logging.info("Creating table....")
cursor.execute(CREATE_TABLE_QUERY)
logging.info("Table is created....")
# loop through the data frame
for i,row in df_data.iterrows():
print(row)
cursor.execute("INSERT INTO csvfile_upload.csvfile_data VALUES {}"
.format(tuple(row)))
# the connection is not auto committed by default, so we must commit to
# save our changes
conn.commit()
except errors.DatabaseError as db_e:
logging.error('%s: %s', db_e.__class__.__name__, db_e)
except errors.Error as error_e:
logging.error('%s: %s', error_e.__class__.__name__, error_e)
|
yogeshwarreddy13/local_server | local_server.py | <gh_stars>0
#!c:\users\yogeshwar\anaconda3\python.exe
from http.server import HTTPServer, BaseHTTPRequestHandler
import cgi
import logging
import pandas as pd
import json
from src.csv_to_db_package.csv_to_db import csv_to_db_func
from src.csv_to_db_package.crud_operations_db import view_db_data, delete_db_row, insert_db_row,\
update_db_row, select_db_row, upload_to_s3
from mysql.connector import connect, errors
import boto3
# from src.csv_to_db_package.csv_to_db import csv_to_db_func
api_endpoint = "https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data"
logging.basicConfig(filename='server_info.log', level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
s3 = boto3.client('s3')
def put_object_to_s3(filename):
with open(filename, 'rb') as file:
result = s3.put_object(Bucket='yogesh-lambda-bucket', Key=filename, Body=file.read())
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith('/uploadCSV'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h1>Welcome to csv upload!!</h1>'
output += '<h3><a href="/uploadCSV/new">Add new file</a></h3>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/new'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>Add new file</h2>'
output += '<form method="POST" enctype="multipart/form-data" action="/uploadCSV/new">'
output += '<input name="task" type="file" placeholder="Add new file">'
output += '<input type="submit" value="Upload">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/viewtable'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>File uploaded Successfully</h2>'
output += '<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>'
output += """<script>
function my_button_click_handler()
{
var rowId = event.target.parentNode.parentNode.id;
var data = document.getElementById(rowId).querySelectorAll(".row-data");
var objectID = data[0].innerHTML;
alert('Button Clicked with row_id =' + rowId + ' objectID =as' + objectID);
$.ajax(
{
type:'POST',
contentType:'application/json;charset-utf-08',
dataType:'json',
url:'http://localhost:8000/delete_data?value='+objectID,
}
);
}
function my_update_data()
{
var rowId = event.target.parentNode.parentNode.id;
var data = document.getElementById(rowId).querySelectorAll(".row-data");
var objectID = data[0].innerHTML;
alert('Button Clicked with row_id =' + rowId + ' objectID =as' + objectID);
window.location.href="/update_data/objectID="+objectID
}
</script>"""
output += """<script>
function redirect_to_create()
{
window.location.href="/add"
}
</script>"""
output += """<script>
function redirect_to_S3()
{
window.location.href="/to_S3"
}
</script>"""
output += view_db_data("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data")
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/to_S3'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/to_S3">'
output += '<h2>Click to store data in S3</h2>'
output += '<input type="submit" value="Upload to S3">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/add'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/add">'
output += 'objectID: <input name="objectID" type="text"><br><br>'
output += 'isHighlight: <input name="isHighlight" type="text"><br><br>'
output += 'accessionNumber: <input name="accessionNumber" type="text"><br><br>'
output += 'accessionYear: <input name="accessionYear" type="text"><br><br>'
output += 'isPublicDomain: <input name="isPublicDomain" type="text"><br><br>'
output += 'primaryImage: <input name="primaryImage" type="text"><br><br>'
output += 'primaryImageSmall: <input name="primaryImageSmall" type="text"><br><br>'
output += 'additionalImages: <input name="additionalImages" type="text"><br><br>'
output += 'department: <input name="department" type="text"><br><br>'
output += 'objectName: <input name="objectName" type="text"><br><br>'
output += 'title: <input name="title" type="text"><br><br>'
output += 'culture: <input name="culture" type="text"><br><br>'
output += 'period: <input name="period" type="text"><br><br>'
output += 'dynasty: <input name="dynasty" type="text"><br><br>'
output += 'reign: <input name="reign" type="text"><br><br>'
output += 'portfolio: <input name="portfolio" type="text"><br><br>'
output += 'artistRole: <input name="artistRole" type="text"><br><br>'
output += 'artistPrefix: <input name="artistPrefix" type="text"><br><br>'
output += 'artistDisplayName: <input name="artistDisplayName" type="text"><br><br>'
output += 'artistDisplayBio: <input name="artistDisplayBio" type="text"><br><br>'
output += 'artistSuffix: <input name="artistSuffix" type="text"><br><br>'
output += 'artistAlphaSort: <input name="artistAlphaSort" type="text"><br><br>'
output += 'artistNationality: <input name="artistNationality" type="text"><br><br>'
output += 'artistBeginDate: <input name="artistBeginDate" type="text"><br><br>'
output += 'artistEndDate: <input name="artistEndDate" type="text"><br><br>'
output += 'artistGender: <input name="artistGender" type="text"><br><br>'
output += 'artistWikidata_URL: <input name="artistWikidata_URL" type="text"><br><br>'
output += 'artistULAN_URL: <input name="artistULAN_URL" type="text"><br><br>'
output += 'objectDate: <input name="objectDate" type="text"><br><br>'
output += 'objectBeginDate: <input name="objectBeginDate" type="text"><br><br>'
output += 'objectEndDate: <input name="objectEndDate" type="text"><br><br>'
output += 'medium: <input name="medium" type="text"><br><br>'
output += 'dimensions: <input name="dimensions" type="text"><br><br>'
output += 'measurements: <input name="measurements" type="text"><br><br>'
output += 'creditLine: <input name="creditLine" type="text"><br><br>'
output += 'geographyType: <input name="geographyType" type="text"><br><br>'
output += 'city: <input name="city" type="text"><br><br>'
output += 'state: <input name="state" type="text"><br><br>'
output += 'county: <input name="county" type="text"><br><br>'
output += 'country: <input name="country" type="text"><br><br>'
output += 'region: <input name="region" type="text"><br><br>'
output += 'subregion: <input name="subregion" type="text"><br><br>'
output += 'locale: <input name="locale" type="text"><br><br>'
output += 'locus: <input name="locus" type="text"><br><br>'
output += 'excavation: <input name="excavation" type="text"><br><br>'
output += 'river: <input name="river" type="text"><br><br>'
output += 'classification: <input name="classification" type="text"><br><br>'
output += 'rightsAndReproduction: <input name="rightsAndReproduction" type="text"><br><br>'
output += 'linkResource: <input name="linkResource" type="text"><br><br>'
output += 'metadataDate: <input name="metadataDate" type="text"><br><br>'
output += 'repository: <input name="repository" type="text"><br><br>'
output += 'objectURL: <input name="objectURL" type="text"><br><br>'
output += 'tags: <input name="tags" type="text"><br><br>'
output += 'objectWikidata_URL: <input name="objectWikidata_URL" type="text"><br><br>'
output += 'isTimelineWork: <input name="isTimelineWork" type="text"><br><br>'
output += 'GalleryNumber: <input name="GalleryNumber" type="text"><br><br>'
output += 'constituentID: <input name="constituentID" type="text"><br><br>'
output += 'role: <input name="role" type="text"><br><br>'
output += 'name: <input name="name" type="text"><br><br>'
output += 'constituentULAN_URL: <input name="constituentULAN_URL" type="text"><br><br>'
output += 'constituentWikidata_URL: <input name="constituentWikidata_URL" type="text"><br><br>'
output += 'gender: <input name="gender" type="text"><br><br>'
output += '<input type="submit" value="Add">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.startswith('/update_data'):
value = str(self.path[22:])[3:]
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/update_data">'
output += select_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", int(value))
output += """<script>
function redirect_to_viewtable()
{
window.location.href="/viewtable"
}
</script>"""
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
except Exception as err:
logging.error('%s: %s', err.__class__.__name__, err)
def do_POST(self):
try:
if self.path.endswith('/new'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
file = fields.get('task')[0]
file = file.decode("cp1252")
with open('file.csv', mode='w', encoding='utf-8') as f:
for data in file.split('\r\r'):
f.write(data)
df = pd.read_csv('file.csv')
df.to_json('jsondata.json', orient='records')
put_object_to_s3('jsondata.json')
# csv_to_db_func('file.csv')
# upload_to_s3('file.csv')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
self.wfile.write(file.encode())
if self.path.startswith('/delete_data'):
value_id = self.path[22:]
delete_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", value_id)
# with open("jsondata.json", "r") as jsonFile:
# data = json.load(jsonFile)
# data = [i for i in data if not (i['objectID'] == int(value_id))]
# with open("jsondata.json", "w") as jsonFile:
# json.dump(data, jsonFile)
# put_object_to_s3('jsondata.json')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.startswith('/to_S3'):
upload_to_s3()
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.endswith('/add'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
saviour = pdict['boundary']
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
guess = self.rfile
fields = cgi.parse_multipart(self.rfile, pdict)
i=0
for key in fields:
if fields[key][0] == '':
i = i+1
if i != 0:
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>All fields must be filled</h2>'
output += '<h3><a href="/viewtable">Back to viewtable</a></h3>'
output += '</body></html>'
self.wfile.write(output.encode())
else:
for key in fields:
fields[key] = fields[key][0]
print(fields)
insert_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", fields)
# with open('jsondata.json') as json_file:
# obj_list = json.load(json_file)
# obj_list.append(fields)
# with open('jsondata.json', 'w') as json_file:
# json.dump(obj_list, json_file,
# indent=4,
# separators=(',', ': '))
# put_object_to_s3('jsondata.json')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.startswith('/update_data'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
# conn = connect(host='localhost',
# database="csvfile_upload",
# user='root',
# password='<PASSWORD>')
#
# if conn.is_connected():
# cursor = conn.cursor()
# query = "SELECT objectId From csvfile_upload.csvfile_data"
# cursor.execute(query)
# primary_keys = cursor.fetchall()
# primary_key = fields['objectId']
# list1 = []
# for i in range(len(primary_keys)):
# list1.append(i)
# list2 = []
# for i in range(len(list1)):
# list2.append(str(primary_keys[i][0]))
# if primary_key[0] in list2:
for key in fields:
fields[key] = fields[key][0]
update_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", fields, int(fields['objectID']))
# with open("jsondata.json", "r") as jsonFile:
# data = json.load(jsonFile)
# for i in data:
# if int(fields['objectId']) == i['objectID']:
# i.update(fields)
# with open("jsondata.json", "w") as jsonFile:
# json.dump(data, jsonFile)
# put_object_to_s3('jsondata.json')
# else:
# self.send_response(200)
# self.send_header('content-type', 'text/html')
# self.end_headers()
#
# output = ''
# output += '<html><head><meta charset="utf-8"></head><body>'
# output += '<h2>You cannot update primary key</h2>'
# output += '<h3><a href="/viewtable">Back to viewtable</a></h3>'
# output += '</body></html>'
#
# self.wfile.write(output.encode())
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
except Exception as err:
logging.error('%s: %s', err.__class__.__name__, err)
def main():
port = 8000
server = HTTPServer(('', port), RequestHandler)
print("Server started on localhost: ", port)
server.serve_forever()
if __name__ == "__main__":
main()
|
yogeshwarreddy13/local_server | request_to_restapi.py | import requests
import json
data = requests.get( "https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data")
data=json.loads(data.text)
# print(tuple(data["body"][0].keys()))
list1 = []
for i in data["body"]:
list1.append(tuple(i.values()))
print(list1) |
yogeshwarreddy13/local_server | test_CRUD.py | from mysql.connector.errors import ProgrammingError, DatabaseError
from csv_to_db_package.crud_operations_db import view_db_data, delete_db_row, insert_db_row, \
update_db_row, select_db_row
from fake_data import value1, value2, value3, value4, value5, value6, value7
from test_fakeDB import MockDB, MYSQL_DB
empty = {}
class TestCrudData(MockDB):
"""Test class for testing the different operations on database
went successful or not by defining different methods"""
def test_insert_in_database_successful(self):
"""Checks if data inserted into database table is successfully inserted"""
with self.mock_db_config:
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value1)
val1_list = []
for i in value1.values():
val1_list.append(i)
val1_tuple = tuple(val1_list)
self.assertEqual(select_db_row(db_name=MYSQL_DB, db_table='test_table', object_id=1), val1_tuple)
def test_insert_in_db_unsuccessful(self):
"""Checks if inserting wrong values throws exception"""
with self.mock_db_config:
with self.assertRaises(DatabaseError):
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value4)
with self.assertRaises(DatabaseError):
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value5)
with self.assertRaises(DatabaseError):
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=empty)
def test_delete_from_db_successful(self):
"""Checks if data will get deleted from db table by passing the specified id"""
with self.mock_db_config:
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value7)
delete_db_row(db_name=MYSQL_DB, db_table='test_table', object_id=9)
self.assertIsNone(select_db_row(db_name=MYSQL_DB, db_table='test_table', object_id=9))
def test_delete_from_db_unsuccessful(self):
"""Checks if specified id provided for delete operation is correct or not"""
with self.mock_db_config:
with self.assertRaises(ProgrammingError):
delete_db_row(db_name=MYSQL_DB, db_table='test_table', object_id='')
with self.assertRaises(ProgrammingError):
delete_db_row(db_name=MYSQL_DB, db_table='test_table', object_id=[56])
with self.assertRaises(ProgrammingError):
delete_db_row(db_name=MYSQL_DB, db_table='test_table', object_id={'key': 'value'})
def test_update_row_of_database_successful(self):
"""Checks if updating the row went successful"""
with self.mock_db_config:
insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value2)
update_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value6, object_id=2)
val6_list = []
for i in value6.values():
val6_list.append(i)
val6_tuple = tuple(val6_list)
self.assertEqual(select_db_row(db_name=MYSQL_DB, db_table='test_table', object_id=2), val6_tuple)
def test_update_row_of_db_unsuccessful(self):
"""Checks if updating the data throws exception while providing wrong inputs"""
with self.mock_db_config:
with self.assertRaises(ProgrammingError):
update_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value3, object_id=6)
with self.assertRaises(DatabaseError):
update_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value4, object_id=77)
with self.assertRaises(ProgrammingError):
update_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values={}, object_id=100)
def test_read_from_db_successful(self):
"""Checks if data read from table returns same data which is in db"""
with self.mock_db_config:
# insert_db_row(db_name=MYSQL_DB, db_table='test_table', dict_values=value8)
# val8_list = []
# for i in value8.values():
# val8_list.append(i)
# val8_tuple = tuple(val8_list)
self.assertIsNotNone(view_db_data(db_name=MYSQL_DB, db_table='test_table'))
def test_read_from_db_unsuccessful(self):
"""Checks if db name or db table name is given correct or not"""
with self.mock_db_config:
with self.assertRaises(ProgrammingError):
view_db_data(db_name=MYSQL_DB, db_table='data_table')
with self.assertRaises(ProgrammingError):
view_db_data(db_name=MYSQL_DB, db_table=100)
with self.assertRaises(ProgrammingError):
view_db_data(db_name='data_base', db_table=100)
|
yogeshwarreddy13/local_server | fake_data.py | value1 = {
"objectID": 1,
"isHighlight": 0,
"accessionNumber": "1979.486.1",
"accessionYear": 1979,
"isPublicDomain": 0,
"primaryImage": '0',
"primaryImageSmall": '0',
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "One-dollar Liberty Head Coin",
"culture": '0',
"period": '0.0',
"dynasty": '0',
"reign": '0',
"portfolio": '0',
"artistRole": "Maker",
"artistPrefix": '0',
"artistDisplayName": "<NAME>",
"artistDisplayBio": "American, Delaware County, Pennsylvania 1794\\u20131869 Philadelphia, Pennsylvania",
"artistSuffix": '0',
"artistAlphaSort": "Longacre, <NAME>",
"artistNationality": "American",
"artistBeginDate": "1794",
"artistEndDate": "1869",
"artistGender": '0',
"artistWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"artistULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"objectDate": "1853",
"objectBeginDate": "1853",
"objectEndDate": "1853",
"medium": "Gold",
"dimensions": "Dimensions unavailable",
"measurements": "null",
"creditLine": "Gift of <NAME>, 1979",
"geographyType": '0',
"city": '0',
"state": '0',
"county": '0',
"country": '0',
"region": '0',
"subregion": '0.0',
"locale": '0.0',
"locus": '0.0',
"excavation": '0.0',
"river": '0.0',
"classification": '0.0',
"rightsAndReproduction": '0.0',
"linkResource": '0.0',
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/1",
"tags": "null",
"objectWikidata_URL": '0',
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 164292.0,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"gender": '0'}
value2 = {
"objectID": 2,
"isHighlight": 0,
"accessionNumber": "1980.264.5",
"accessionYear": 1980,
"isPublicDomain": 0,
"primaryImage": '0',
"primaryImageSmall": '0',
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "Ten-dollar Liberty Head Coin",
"culture": '0',
"period": '0.0',
"dynasty": '0',
"reign": '0',
"portfolio": '0',
"artistRole": "Maker",
"artistPrefix": '0',
"artistDisplayName": "<NAME>",
"artistDisplayBio": "1785\\u20131844",
"artistSuffix": '0',
"artistAlphaSort": "Gobrecht, Christian",
"artistNationality": '0',
"artistBeginDate": "1785",
"artistEndDate": "1844",
"artistGender": '0',
"artistWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"artistULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"objectDate": "1901",
"objectBeginDate": "1901",
"objectEndDate": "1901",
"medium": "Gold",
"dimensions": "Dimensions unavailable",
"measurements": "null",
"creditLine": "Gift of <NAME>, 1980",
"geographyType": '0',
"city": '0',
"state": '0',
"county": '0',
"country": '0',
"region": '0',
"subregion": '0',
"locale": '0',
"locus": '0',
"excavation": '0',
"river": '0',
"classification": '0',
"rightsAndReproduction": '0',
"linkResource": '0',
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/2",
"tags": "null",
"objectWikidata_URL": '0',
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 1079.0,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"gender": '0'}
value3 = {
"objectID": 3,
"isHighlight": 0,
"accessionNumber": "67.265.9",
"accessionYear": "1967",
"isPublicDomain": 0,
"primaryImage": 0,
"primaryImageSmall": 0,
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "Two-and-a-Half Dollar Coin",
"culture": 0,
"period": 0,
"dynasty": 0,
"reign": 0,
"portfolio": 0,
"artistRole": 0,
"artistPrefix": 0,
"artistDisplayName": 0,
"artistDisplayBio": 0,
"artistSuffix": 0,
"artistAlphaSort": 0,
"artistNationality": 0,
"artistBeginDate": 0,
"artistEndDate": 0,
"artistGender": 0,
"artistWikidata_URL": 0,
"artistULAN_URL": 0,
"objectDate": "1909\\u201327",
"objectBeginDate": 1909,
"objectEndDate": 1927,
"medium": "Gold",
"dimensions": "Diam. 11/16 in. (1.7 cm)",
"measurements": "null",
"creditLine": "Gift of <NAME> Jr., 1967",
"geographyType": 0,
"city": 0,
"state": 0,
"county": 0,
"country": 0,
"region": 0,
"subregion": 0,
"locale": 0,
"locus": 0,
"excavation": 0,
"river": 0,
"classification": 0,
"rightsAndReproduction": 0,
"linkResource": 0,
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/3",
"tags": "null",
"objectWikidata_URL": "",
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 1079,
"role": "Maker",
"error":"programmingerror",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"gender": 0}
value4 = {
"objectID": 15,
"isHighlight": 0,
"accessionNumber": "16.74.49",
"accessionYear": "1916",
"isPublicDomain": 0,
"primaryImage": 0,
"primaryImageSmall": 0,
"additionalImages": "[]",
"department": "The American Wing",
"bug":"programmingerror",
"objectName": "Coin",
"title": "Coin, 1/2 Real",
"culture": "Mexican",
"period": 0,
"dynasty": 0,
"reign": 0,
"portfolio": 0,
"artistRole": 0,
"artistPrefix": 0,
"artistDisplayName": 0,
"artistDisplayBio": 0,
"artistSuffix": 0,
"artistAlphaSort": 0,
"artistNationality": 0,
"artistBeginDate": 0,
"artistEndDate": 0,
"artistGender": 0,
"artistWikidata_URL": 0,
"artistULAN_URL": 0,
"objectDate": "1665\\u20131700",
"objectBeginDate": 1665,
"objectEndDate": 1700,
"medium": "Silver",
"dimensions": "Diam. 1/2 in. (1.3 cm)",
"measurements": "null",
"creditLine": "Gift of Mrs. <NAME>, 1916",
"geographyType": "Made in",
"city": 0,
"state": 0,
"county": 0,
"country": "Mexico",
"region": 0,
"subregion": 0,
"locale": 0,
"locus": 0,
"excavation": 0,
"river": 0,
"classification": 0,
"rightsAndReproduction": 0,
"linkResource": 0,
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/15",
"tags": "null",
"objectWikidata_URL": 0,
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 1079,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"gender": 0}
value5 = {
"objectID": 15,
"isHighlight": 0,
"accessionNumber": "16.74.49",
"accessionYear": "1916",
"isPublicDomain": 0,
"primaryImage": 0,
"primaryImageSmall": 0,
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "Coin, 1/2 Real",
"culture": "Mexican",
"period": 0,
"dynasty": 0,
"reign": 0,
"portfolio": 0,
"artistRole": 0,
"artistPrefix": 0,
"artistDisplayName": 0,
"artistDisplayBio": 0,
"artistSuffix": 0,
"artistAlphaSort": 0,
"artistNationality": 0,
"artistBeginDate": 0,
"artistEndDate": 0,
"artistGender": 0,
"artistWikidata_URL": 0,
"artistULAN_URL": 0,
"objectDate": "1665\\u20131700",
"objectBeginDate": 1665,
"objectEndDate": 1700,
"medium": "Silver",
"dimensions": "Diam. 1/2 in. (1.3 cm)",
"measurements": "null",
"creditLine": "Gift of Mrs. <NAME>, 1916",
"geographyType": "Made in",
"city": 0,
"state": 0,
"county": 0,
"country": "Mexico",
"region": 0,
"subregion": 0,
"locale": 0,
"locus": 0,
"excavation": 0,
"river": 0,
"classification": 0,
"rightsAndReproduction": 0,
"linkResource": 0,
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/15",
"tags": "null",
"objectWikidata_URL": 0,
"isTimelineWork": 0,
"ExtraCloumn":"nothing",
"GalleryNumber": 0,
"constituentID": 1079,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"gender": 0}
value6 = {
"objectID": 2,
"isHighlight": 0,
"accessionNumber": "1980.264.5",
"accessionYear": 1980,
"isPublicDomain": 0,
"primaryImage": '0',
"primaryImageSmall": '0',
"additionalImages": "[]",
"department": "The updated Wing",
"objectName": "updated Coin",
"title": "updated title Ten-dollar Liberty Head Coin",
"culture": '0',
"period": '0.0',
"dynasty": '0',
"reign": '0',
"portfolio": '0',
"artistRole": "Maker",
"artistPrefix": '0',
"artistDisplayName": "<NAME>",
"artistDisplayBio": "1785u20131844",
"artistSuffix": '0',
"artistAlphaSort": "Gobrecht, Christian",
"artistNationality": '0',
"artistBeginDate": "1785",
"artistEndDate": "1844",
"artistGender": '0',
"artistWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"artistULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"objectDate": "1901",
"objectBeginDate": "1901",
"objectEndDate": "1901",
"medium": "Gold",
"dimensions": "Dimensions unavailable",
"measurements": "null",
"creditLine": "Gift of <NAME>, 1980",
"geographyType": '0',
"city": '0',
"state": '0',
"county": '0',
"country": '0',
"region": '0',
"subregion": '0',
"locale": '0',
"locus": '0',
"excavation": '0',
"river": '0',
"classification": '0',
"rightsAndReproduction": '0',
"linkResource": '0',
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/2",
"tags": "null",
"objectWikidata_URL": '0',
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 1079.0,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500077295",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q5109648",
"gender": '0'}
value7 = {
"objectID": 9,
"isHighlight": 0,
"accessionNumber": "1979.486.1",
"accessionYear": 1979,
"isPublicDomain": 0,
"primaryImage": '0',
"primaryImageSmall": '0',
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "One-dollar Liberty Head Coin",
"culture": '0',
"period": '0.0',
"dynasty": '0',
"reign": '0',
"portfolio": '0',
"artistRole": "Maker",
"artistPrefix": '0',
"artistDisplayName": "<NAME>",
"artistDisplayBio": "American, Delaware County, Pennsylvania 1794\\u20131869 Philadelphia, Pennsylvania",
"artistSuffix": '0',
"artistAlphaSort": "Longacre, <NAME>",
"artistNationality": "American",
"artistBeginDate": "1794",
"artistEndDate": "1869",
"artistGender": '0',
"artistWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"artistULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"objectDate": "1853",
"objectBeginDate": "1853",
"objectEndDate": "1853",
"medium": "Gold",
"dimensions": "Dimensions unavailable",
"measurements": "null",
"creditLine": "Gift of <NAME>, 1979",
"geographyType": '0',
"city": '0',
"state": '0',
"county": '0',
"country": '0',
"region": '0',
"subregion": '0.0',
"locale": '0.0',
"locus": '0.0',
"excavation": '0.0',
"river": '0.0',
"classification": '0.0',
"rightsAndReproduction": '0.0',
"linkResource": '0.0',
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/1",
"tags": "null",
"objectWikidata_URL": '0',
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 164292.0,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"gender": '0'}
value8 = {
"objectID": 100,
"isHighlight": 0,
"accessionNumber": "1979.486.1",
"accessionYear": 1979,
"isPublicDomain": 0,
"primaryImage": '0',
"primaryImageSmall": '0',
"additionalImages": "[]",
"department": "The American Wing",
"objectName": "Coin",
"title": "One-dollar Liberty Head Coin",
"culture": '0',
"period": '0.0',
"dynasty": '0',
"reign": '0',
"portfolio": '0',
"artistRole": "Maker",
"artistPrefix": '0',
"artistDisplayName": "<NAME>",
"artistDisplayBio": "American, Delaware County, Pennsylvania 1794\\u20131869 Philadelphia, Pennsylvania",
"artistSuffix": '0',
"artistAlphaSort": "Longacre, <NAME>",
"artistNationality": "American",
"artistBeginDate": "1794",
"artistEndDate": "1869",
"artistGender": '0',
"artistWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"artistULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"objectDate": "1853",
"objectBeginDate": "1853",
"objectEndDate": "1853",
"medium": "Gold",
"dimensions": "Dimensions unavailable",
"measurements": "null",
"creditLine": "Gift of <NAME>, 1979",
"geographyType": '0',
"city": '0',
"state": '0',
"county": '0',
"country": '0',
"region": '0',
"subregion": '0.0',
"locale": '0.0',
"locus": '0.0',
"excavation": '0.0',
"river": '0.0',
"classification": '0.0',
"rightsAndReproduction": '0.0',
"linkResource": '0.0',
"metadataDate": "2021-04-06T04:41:04.967Z",
"repository": "Metropolitan Museum of Art, New York, NY",
"objectURL": "https://www.metmuseum.org/art/collection/search/1",
"tags": "null",
"objectWikidata_URL": '0',
"isTimelineWork": 0,
"GalleryNumber": 0,
"constituentID": 164292.0,
"role": "Maker",
"name": "<NAME>",
"constituentULAN_URL": "http://vocab.getty.edu/page/ulan/500011409",
"constituentWikidata_URL": "https://www.wikidata.org/wiki/Q3806459",
"gender": '0'} |
jve2kor/Titanic_Machine_Learning | Titanic_disaster_v3.py | import pandas as pd
import seaborn as sb
from sklearn import cross_validation
from sklearn import tree
from sklearn import ensemble
from sklearn.cross_validation import StratifiedKFold
sb.set
file_path_train ="/Users/jvr605/Downloads/kaggle_titanin_dataset/train.csv"
file_path_test="/Users/jvr605/Downloads/kaggle_titanin_dataset/test.csv"
file_path_results="/Users/jvr605/Downloads/kaggle_titanin_dataset/titanic_results.csv"
df = pd.read_csv(file_path_train)
df.Age=df.Age.fillna(value = df.Age.mean())
#removing cabin details from the data frame
df = df.drop('Cabin',axis=1)
#Name column is not so useful,so removing it from the data frame
df = df.drop('Name',axis=1)
#Ticket number does not make any sense
df =df.drop('Ticket',axis=1)
#Conceriting the Embarked column catergorical data into the Numerical data
df.Embarked[df.Embarked =='C'] = 1
df.Embarked[df.Embarked =='S'] = 2
df.Embarked[df.Embarked =='Q'] = 3
df.Embarked = df.Embarked.fillna(2)
""" Perfect there is no Missing data :) """
#Converiting the Female =1 and Male =2 #
df.Sex[df.Sex=='female'] = 1
df.Sex[df.Sex=='male'] = 2
"""Data is cleaned up ,and there no null/NAN values
Its time to perform Exploratory data analyis the data frome"""
train_columns= ['Pclass','Sex','Age',
'SibSp','Parch','Fare','Embarked']
test_columns = ['Survived']
#Considering the inputs for training and testing values to validate against
X =df.loc[:,train_columns].values
Y =df.loc[:,test_columns].values
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X,Y,random_state=0)
clf = ensemble.GradientBoostingClassifier(n_estimators=104)
clf.fit(X_train,Y_train)
print(clf.score(X_test,Y_test))
"""#Perfoming validation on test.csv from Kaggle"""
df_test = pd.read_csv(file_path_test)
df_test.Age=df_test.Age.fillna(value = df_test.Age.mean())
#removing cabin details from the data frame
df_test = df_test.drop('Cabin',axis=1)
#Name column is not so useful,so removing it from the data frame
df_test = df_test.drop('Name',axis=1)
#Ticket number does not make any sense
df_test =df_test.drop('Ticket',axis=1)
#Conceriting the Embarked column catergorical data into the Numerical data
df_test.Embarked[df_test.Embarked =='C'] = 1
df_test.Embarked[df_test.Embarked =='S'] = 2
df_test.Embarked[df_test.Embarked =='Q'] = 3
df_test.Embarked = df_test.Embarked.fillna(2)
""" Perfect there is no Missing data :) """
#Converiting the Female =1 and Male =2 #
df_test.Sex[df_test.Sex=='female'] = 1
df_test.Sex[df_test.Sex=='male'] = 2
df_test.Fare = df_test.Fare.fillna(df_test.Fare.median())
predict_Values = clf.predict(df_test.loc[:,train_columns].values)
"""preparing the file to upload into kaggle """
output = pd.DataFrame({'PassengerId':df_test.loc[:,'PassengerId'].values,'Survived':predict_Values})
output.to_csv(file_path_results,index=False) |
achalpatel/thenewboston-python | src/thenewboston/factories/network_validator.py | from factory import Faker
from .network_node import NetworkNodeFactory
from ..constants.network import ACCOUNT_FILE_HASH_LENGTH, BLOCK_IDENTIFIER_LENGTH, MAX_POINT_VALUE, MIN_POINT_VALUE
from ..models.network_validator import NetworkValidator
class NetworkValidatorFactory(NetworkNodeFactory):
daily_confirmation_rate = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
root_account_file = Faker('url')
root_account_file_hash = Faker('text', max_nb_chars=ACCOUNT_FILE_HASH_LENGTH)
seed_block_identifier = Faker('text', max_nb_chars=BLOCK_IDENTIFIER_LENGTH)
class Meta:
model = NetworkValidator
abstract = True
|
achalpatel/thenewboston-python | tests/helpers.py | <reponame>achalpatel/thenewboston-python
from thenewboston.accounts.manage import create_account
from thenewboston.verify_keys.verify_key import encode_verify_key
def random_encoded_account_number():
signing_key, account_number = create_account()
return encode_verify_key(verify_key=account_number)
|
achalpatel/thenewboston-python | src/thenewboston/base_classes/fetch_primary_validator_config.py | <reponame>achalpatel/thenewboston-python
from django.core.exceptions import ValidationError
from django.core.management import CommandParser
from thenewboston.argparser.validators import decimal_validator
from thenewboston.base_classes.initialize_node import InitializeNode
from thenewboston.constants.network import PRIMARY_VALIDATOR
from thenewboston.utils.format import format_address
from thenewboston.utils.network import fetch
from thenewboston.utils.validators import validate_is_real_number
"""
The FetchPrimaryValidatorConfig class contains logic to fetch and validate configuration data from a primary validator.
"""
class FetchPrimaryValidatorConfig(InitializeNode):
def __init__(self, *args, **kwargs):
"""Initialize FetchPrimaryValidatorConfig class"""
super().__init__(*args, **kwargs)
self.required_input = {
'ip_address': None,
'port': None,
'protocol': None,
'trust': None
}
def add_arguments(self, parser: CommandParser):
"""Additional custom arguments"""
super().add_arguments(parser)
parser.add_argument('--trust', type=decimal_validator(min_val=0, max_val=100))
def get_primary_validator_address(self):
"""Return formatted address of primary validator"""
return format_address(
ip_address=self.required_input['ip_address'],
port=self.required_input['port'],
protocol=self.required_input['protocol']
)
def get_trust(self, value=None):
"""Get trust from user"""
valid = False
while not valid:
if self.unattended:
trust = value
else:
trust = input('Enter trust (required): ')
if not trust:
self._error('trust required')
continue
is_valid_decimal, trust = self.validate_and_convert_to_decimal(trust)
if not is_valid_decimal:
continue
try:
validate_is_real_number(trust)
except ValidationError:
self._error('Value must be a real number')
continue
if trust < 0:
self._error('Value can not be less than 0')
continue
if trust > 100:
self._error('Value can not be greater than 100')
continue
self.required_input['trust'] = trust
valid = True
def fetch_validator_config(self):
"""Return config"""
address = self.get_primary_validator_address()
url = f'{address}/config'
results = fetch(url=url, headers={})
return results
def handle(self, *args, **options):
"""Run script"""
connected = False
while not connected:
self.required_input = {
'ip_address': None,
'port': None,
'protocol': None
}
self.get_ip_address(value=options.get('ip_address'))
self.get_protocol(value=options.get('protocol'))
self.get_port(value=options.get('port'))
try:
config = self.fetch_validator_config()
if not self.is_config_valid(config):
continue
self.get_trust(value=options.get('trust'))
self.handle_primary_validator_config(config)
except Exception as e:
self._error('Unable to connect')
self._error(e)
continue
connected = True
self.stdout.write(self.style.SUCCESS('Success'))
def handle_primary_validator_config(self, primary_validator_config):
"""Handle primary validator configuration data"""
raise NotImplementedError(
'subclasses of FetchPrimaryValidatorConfig must provide a handle_primary_validator_config() method'
)
def is_config_valid(self, config):
"""Validate config response data from the validator"""
if config.get('node_type') != PRIMARY_VALIDATOR:
self._error(f'node_type is not {PRIMARY_VALIDATOR}')
return False
return True
|
achalpatel/thenewboston-python | setup.py | # -*- coding: utf-8 -*-
import re
from setuptools import find_packages, setup
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt', encoding='utf-8') as f:
requirements = f.read().splitlines()
with open('src/thenewboston/__init__.py', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
setup(
install_requires=requirements,
long_description=long_description,
long_description_content_type='text/markdown',
name='thenewboston',
packages=find_packages(
exclude=['tests', 'tests.*']
),
version=version,
)
|
achalpatel/thenewboston-python | src/thenewboston/factories/created_modified.py | from factory import Faker
from factory.django import DjangoModelFactory
from ..models.created_modified import CreatedModified
class CreatedModifiedFactory(DjangoModelFactory):
created_date = Faker('date')
modified_date = Faker('date')
class Meta:
model = CreatedModified
abstract = True
|
achalpatel/thenewboston-python | src/thenewboston/utils/format.py | <reponame>achalpatel/thenewboston-python
def format_address(*, ip_address, port, protocol):
"""Format address"""
port = f':{port}' if port else ''
return f'{protocol}://{ip_address}{port}'
|
achalpatel/thenewboston-python | src/thenewboston/third_party/factory/utils.py | import factory
def build_json(factory_class, **kwargs):
"""Build json representation for object using factory."""
return factory.build(
dict,
FACTORY_CLASS=factory_class,
**kwargs,
)
|
achalpatel/thenewboston-python | src/thenewboston/models/network_node.py | from uuid import uuid4
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from thenewboston.constants.network import MAX_POINT_VALUE, MIN_POINT_VALUE, PROTOCOL_CHOICES, VERIFY_KEY_LENGTH
class NetworkNode(models.Model):
id = models.UUIDField(default=uuid4, editable=False, primary_key=True) # noqa: A003
account_number = models.CharField(max_length=VERIFY_KEY_LENGTH)
ip_address = models.GenericIPAddressField()
node_identifier = models.CharField(max_length=VERIFY_KEY_LENGTH, unique=True)
port = models.PositiveIntegerField(blank=True, null=True, validators=[MaxValueValidator(65535)])
protocol = models.CharField(choices=PROTOCOL_CHOICES, max_length=5)
version = models.CharField(max_length=32)
# Fees
default_transaction_fee = models.PositiveBigIntegerField(
default=MIN_POINT_VALUE,
validators=[
MaxValueValidator(MAX_POINT_VALUE),
MinValueValidator(MIN_POINT_VALUE),
]
)
class Meta:
abstract = True
indexes = [
models.Index(fields=['ip_address', 'port', 'protocol']),
]
constraints = [
models.UniqueConstraint(
fields=['ip_address', 'port', 'protocol'],
name='%(app_label)s_%(class)s_unique_ip_port_proto')
]
|
achalpatel/thenewboston-python | src/thenewboston/constants/errors.py | <reponame>achalpatel/thenewboston-python
ERROR = 'ERROR'
# Error types
BAD_SIGNATURE = 'BAD_SIGNATURE'
UNKNOWN = 'UNKNOWN'
|
achalpatel/thenewboston-python | src/thenewboston/factories/network_node.py | <reponame>achalpatel/thenewboston-python<gh_stars>100-1000
from factory import Faker
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
from ..constants.network import MAX_POINT_VALUE, MIN_POINT_VALUE, PROTOCOL_CHOICES, VERIFY_KEY_LENGTH
from ..models.network_node import NetworkNode
class NetworkNodeFactory(DjangoModelFactory):
account_number = Faker('text', max_nb_chars=VERIFY_KEY_LENGTH)
default_transaction_fee = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
ip_address = Faker('ipv4')
node_identifier = Faker('text', max_nb_chars=VERIFY_KEY_LENGTH)
port = Faker('random_int', min=0, max=65535)
protocol = FuzzyChoice([p[1] for p in PROTOCOL_CHOICES])
version = Faker('text', max_nb_chars=32)
class Meta:
model = NetworkNode
abstract = True
|
achalpatel/thenewboston-python | src/thenewboston/utils/serializers.py | from rest_framework import serializers
def validate_keys(serializer_instance, data):
"""Check that there are no additional keys included in the data"""
invalid_keys = {k for k in data.keys() if k not in serializer_instance.fields.keys()}
if invalid_keys:
raise serializers.ValidationError(f'Invalid keys {invalid_keys}')
|
achalpatel/thenewboston-python | src/thenewboston/utils/signed_requests.py | from thenewboston.blocks.signatures import generate_signature
from thenewboston.utils.tools import sort_and_encode
from thenewboston.verify_keys.verify_key import encode_verify_key, get_verify_key
def generate_signed_request(*, data, nid_signing_key):
"""Generate and return signed request"""
node_identifier = get_verify_key(signing_key=nid_signing_key)
signature = generate_signature(
message=sort_and_encode(data),
signing_key=nid_signing_key
)
return {
'message': data,
'node_identifier': encode_verify_key(verify_key=node_identifier),
'signature': signature
}
|
achalpatel/thenewboston-python | src/thenewboston/third_party/pytest/client.py | from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_204_NO_CONTENT
from rest_framework.test import APIClient
class APIError(Exception):
def __init__(self, *args, resp, expected):
"""Inits APIError which includes the response and the expected response"""
super().__init__(*args)
self.resp = resp
self.expected = expected
class UserWrapper:
def __init__(self, user=None):
"""Creates APIClient and authenticates the user"""
self.user = user
self.client = APIClient()
self.client.force_authenticate(user)
def _method(self, method, url, data):
return getattr(self.client, method)(url, data, format='json')
@staticmethod
def check_code(resp, expected):
if resp.status_code != expected:
msg = '{} {}'.format(resp.status_code, resp.content)[:1000]
raise APIError(msg, resp=resp, expected=expected)
def delete(self, url, data=None):
return self._method('delete', url, data or {})
def delete_json(self, *args, expected=HTTP_204_NO_CONTENT, **kwargs):
response = self.delete(*args, **kwargs)
self.check_code(response, expected)
assert not response.content, response.content
def get(self, url, data=None):
return self._method('get', url, data or {})
def get_json(self, *args, expected=HTTP_200_OK, **kwargs):
response = self.get(*args, **kwargs)
self.check_code(response, expected)
return response.json()
def patch(self, url, data):
return self._method('patch', url, data)
def patch_json(self, *args, expected=HTTP_200_OK, **kwargs):
response = self.patch(*args, **kwargs)
self.check_code(response, expected)
return response.json()
def post(self, url, data):
return self._method('post', url, data)
def post_json(self, *args, expected=HTTP_201_CREATED, **kwargs):
response = self.post(*args, **kwargs)
self.check_code(response, expected)
return response.json()
def put(self, url, data):
return self._method('put', url, data)
def put_json(self, *args, expected=HTTP_200_OK, **kwargs):
response = self.put(*args, **kwargs)
self.check_code(response, expected)
return response.json()
|
achalpatel/thenewboston-python | src/thenewboston/utils/fields.py | def all_field_names(model):
"""Return list of all field names for model"""
return [f.name for f in model._meta.get_fields()]
def common_field_names(a, b):
"""Return field names that are common between different models"""
field_names_a = standard_field_names(a)
field_names_b = standard_field_names(b)
return field_names_a.intersection(field_names_b)
def standard_field_names(model):
"""Return set of standard (excluding date, ID, and relational) field names for model"""
excluded = ['id', 'created_date', 'modified_date']
return {f.name for f in model._meta.get_fields() if f.name not in excluded and not f.is_relation}
|
achalpatel/thenewboston-python | src/thenewboston/serializers/account_balance.py | from rest_framework import serializers
from thenewboston.constants.network import MAX_POINT_VALUE, VERIFY_KEY_LENGTH
class AccountBalanceSerializer(serializers.Serializer):
account_number = serializers.CharField(max_length=VERIFY_KEY_LENGTH)
balance = serializers.IntegerField(min_value=0, max_value=MAX_POINT_VALUE)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
|
achalpatel/thenewboston-python | src/thenewboston/utils/files.py | <reponame>achalpatel/thenewboston-python<filename>src/thenewboston/utils/files.py
import json
from hashlib import sha3_256 as sha3
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
def get_file_hash(file):
"""Return hash value of file"""
h = sha3()
with default_storage.open(file, 'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(1024)
h.update(chunk)
return h.hexdigest()
def read_json(file):
"""Read JSON file"""
try:
with default_storage.open(file, 'r') as f:
data = json.load(f)
except FileNotFoundError:
data = None
return data
def write_json(file, data):
"""Write JSON file"""
try:
default_storage.delete(file)
except NotImplementedError:
pass
default_storage.save(
file,
ContentFile(
json.dumps(data)
)
)
|
achalpatel/thenewboston-python | src/thenewboston/serializers/network_transaction.py | <reponame>achalpatel/thenewboston-python
from rest_framework import serializers
from thenewboston.constants.network import ACCEPTED_FEE_LIST, MAX_POINT_VALUE, MIN_POINT_VALUE, VERIFY_KEY_LENGTH
from thenewboston.utils.serializers import validate_keys
class NetworkTransactionSerializer(serializers.Serializer):
amount = serializers.IntegerField(max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
fee = serializers.ChoiceField(choices=ACCEPTED_FEE_LIST, required=False)
recipient = serializers.CharField(max_length=VERIFY_KEY_LENGTH, min_length=VERIFY_KEY_LENGTH)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
def validate(self, data):
"""Check that there are no additional keys included in the data"""
validate_keys(self, data)
return data
@staticmethod
def validate_amount(amount):
"""Check that amount is not 0"""
if amount == 0:
raise serializers.ValidationError('Tx amount can not be 0 (Tx should be excluded)')
return amount
@staticmethod
def validate_recipient(recipient):
"""Check that recipient is a valid hexadecimal"""
try:
int(recipient, 16)
except ValueError:
raise serializers.ValidationError('Recipient must be a valid hexadecimal')
return recipient
|
achalpatel/thenewboston-python | src/thenewboston/constants/crawl.py | CRAWL_COMMAND_START = 'start'
CRAWL_COMMAND_STOP = 'stop'
CRAWL_STATUS_CRAWLING = 'crawling'
CRAWL_STATUS_NOT_CRAWLING = 'not_crawling'
CRAWL_STATUS_STOP_REQUESTED = 'stop_requested'
|
achalpatel/thenewboston-python | src/thenewboston/argparser/validators.py | <filename>src/thenewboston/argparser/validators.py
# -*- coding: utf-8 -*-
import argparse
import decimal
import math
from pathlib import Path
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator, validate_ipv4_address, validate_ipv6_address
def str_length_validator(length: int = None, min_len: int = None, max_len: int = None):
"""Argparse validator for strings"""
def inner(value):
if not isinstance(value, str):
raise argparse.ArgumentTypeError('Expecting string')
if length and not len(value) == length:
raise argparse.ArgumentTypeError('String length should be exactly %s chars' % length)
if min_len and len(value) < min_len:
raise argparse.ArgumentTypeError('String length should be greater or equal to %s chars' % min_len)
if max_len and len(value) > max_len:
raise argparse.ArgumentTypeError('String length should be less or equal to %s chars' % max_len)
return value
return inner
def int_validator(min_val: int = None, max_val: int = None):
"""Argparse validator for integers"""
def inner(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError('Value is not a valid integer')
if min_val and value < min_val:
raise argparse.ArgumentTypeError('Value can not be less than %s' % min_val)
if max_val and value > max_val:
raise argparse.ArgumentTypeError('Value can not be greater than %s' % max_val)
return value
return inner
def decimal_validator(min_val: [int, decimal.Decimal] = None, max_val: [int, decimal.Decimal] = None):
"""Argparse validator for decimals"""
def inner(value):
try:
value = decimal.Decimal(value)
except decimal.InvalidOperation:
raise argparse.ArgumentTypeError('Value is not a valid decimal')
if math.isnan(value):
raise argparse.ArgumentTypeError('NaN is not allowed')
if math.isinf(value):
raise argparse.ArgumentTypeError('Infinity is not allowed')
if min_val and value < min_val:
raise argparse.ArgumentTypeError(f'Value can not be less than {min_val}')
if max_val and value > max_val:
raise argparse.ArgumentTypeError(f'Value can not be greater than {max_val}')
return value
return inner
def ipv46_validator():
"""Argparse validator to check whether a string is IP v4 or v6 address"""
def inner(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise argparse.ArgumentTypeError('Enter a valid IPv4 or IPv6 address')
return value
return inner
def url_validator(suffix: str = None):
"""Argparse validator to check whether a string a proper url"""
def inner(value):
try:
validator = URLValidator(schemes=['http', 'https'])
validator(value)
except ValidationError:
raise argparse.ArgumentTypeError('Invalid URL')
if suffix and Path(value).suffix != suffix:
raise argparse.ArgumentTypeError('JSON file required')
return value
return inner
|
achalpatel/thenewboston-python | src/thenewboston/transactions/validation.py | def validate_transaction_exists(*, amount, fee, error, recipient, txs):
"""Check for the existence of a Tx"""
tx = next(
(
tx for tx in txs if
tx.get('amount') >= amount
and tx.get('fee') == fee
and tx.get('recipient') == recipient
),
None
)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_fee': fee,
'expected_recipient': recipient
})
|
achalpatel/thenewboston-python | src/thenewboston/serializers/network_block.py | from rest_framework import serializers
from thenewboston.blocks.signatures import verify_signature
from thenewboston.constants.network import BANK, PRIMARY_VALIDATOR, SIGNATURE_LENGTH, VERIFY_KEY_LENGTH
from thenewboston.serializers.message import MessageSerializer
from thenewboston.utils.serializers import validate_keys
from thenewboston.utils.tools import sort_and_encode
class NetworkBlockSerializer(serializers.Serializer):
account_number = serializers.CharField(max_length=VERIFY_KEY_LENGTH, min_length=VERIFY_KEY_LENGTH)
message = MessageSerializer()
signature = serializers.CharField(max_length=SIGNATURE_LENGTH, min_length=SIGNATURE_LENGTH)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
def validate(self, data):
"""Validate signature, unique Tx recipients, unique Tx fees and account_number not included as a Tx recipient"""
account_number = data['account_number']
message = data['message']
txs = message['txs']
signature = data['signature']
verify_signature(
message=sort_and_encode(message),
signature=signature,
verify_key=account_number
)
recipient_list = [tx['recipient'] for tx in txs]
recipient_set = set(recipient_list)
if len(recipient_list) != len(recipient_set):
raise serializers.ValidationError('Tx recipients must be unique')
if account_number in recipient_set:
raise serializers.ValidationError('Block account_number not allowed as Tx recipient')
bank_fee_exists = False
primary_validator_fee_exists = False
for tx in txs:
fee = tx.get('fee', None)
if fee is None:
continue
if fee == BANK:
if bank_fee_exists:
raise serializers.ValidationError('Multiple bank fees not allowed')
else:
bank_fee_exists = True
if fee == PRIMARY_VALIDATOR:
if primary_validator_fee_exists:
raise serializers.ValidationError('Multiple primary validator fees not allowed')
else:
primary_validator_fee_exists = True
validate_keys(self, data)
return data
|
achalpatel/thenewboston-python | src/thenewboston/serializers/confirmation_block_message.py | <filename>src/thenewboston/serializers/confirmation_block_message.py
from rest_framework import serializers
from thenewboston.constants.network import BLOCK_IDENTIFIER_LENGTH
from thenewboston.serializers.account_balance import AccountBalanceSerializer
from thenewboston.serializers.network_block import NetworkBlockSerializer
class ConfirmationBlockMessageSerializer(serializers.Serializer):
block = NetworkBlockSerializer()
block_identifier = serializers.CharField(max_length=BLOCK_IDENTIFIER_LENGTH)
updated_balances = AccountBalanceSerializer(many=True)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
|
achalpatel/thenewboston-python | src/thenewboston/constants/clean.py | <gh_stars>100-1000
CLEAN_COMMAND_START = 'start'
CLEAN_COMMAND_STOP = 'stop'
CLEAN_STATUS_CLEANING = 'cleaning'
CLEAN_STATUS_NOT_CLEANING = 'not_cleaning'
CLEAN_STATUS_STOP_REQUESTED = 'stop_requested'
|
achalpatel/thenewboston-python | src/thenewboston/utils/tools.py | <reponame>achalpatel/thenewboston-python
import json
def sort_and_encode(dictionary):
"""Sort dictionary and return encoded data"""
return json.dumps(dictionary, separators=(',', ':'), sort_keys=True).encode('utf-8')
|
achalpatel/thenewboston-python | src/thenewboston/serializers/message.py | <gh_stars>100-1000
from rest_framework import serializers
from thenewboston.constants.network import BALANCE_LOCK_LENGTH
from thenewboston.serializers.network_transaction import NetworkTransactionSerializer
from thenewboston.utils.serializers import validate_keys
class MessageSerializer(serializers.Serializer):
balance_key = serializers.CharField(max_length=BALANCE_LOCK_LENGTH)
txs = NetworkTransactionSerializer(many=True)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
def validate(self, data):
"""Validate Txs exist"""
if not data['txs']:
raise serializers.ValidationError('Invalid Txs')
validate_keys(self, data)
return data
|
achalpatel/thenewboston-python | src/thenewboston/utils/messages.py | from hashlib import sha3_256 as sha3
from thenewboston.utils.tools import sort_and_encode
def get_message_hash(*, message):
"""Return has of given message"""
return sha3(sort_and_encode(message)).digest().hex()
|
achalpatel/thenewboston-python | src/thenewboston/factories/confirmation_service.py | <filename>src/thenewboston/factories/confirmation_service.py
import pytz
from factory import Faker
from .created_modified import CreatedModifiedFactory
from ..models.confirmation_service import ConfirmationService
class ConfirmationServiceFactory(CreatedModifiedFactory):
end = Faker('date_time', tzinfo=pytz.utc)
start = Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = ConfirmationService
|
achalpatel/thenewboston-python | src/thenewboston/factories/network_transaction.py | from factory import Faker, Iterator
from factory.django import DjangoModelFactory
from ..constants.network import ACCEPTED_FEE_LIST, MAX_POINT_VALUE, MIN_POINT_VALUE, VERIFY_KEY_LENGTH
from ..models.network_transaction import NetworkTransaction
class NetworkTransactionFactory(DjangoModelFactory):
amount = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
fee = Iterator(ACCEPTED_FEE_LIST + [''])
recipient = Faker('text', max_nb_chars=VERIFY_KEY_LENGTH)
class Meta:
model = NetworkTransaction
|
achalpatel/thenewboston-python | src/thenewboston/models/network_transaction.py | from uuid import uuid4
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from thenewboston.constants.network import ACCEPTED_FEE_CHOICES, MAX_POINT_VALUE, MIN_POINT_VALUE, VERIFY_KEY_LENGTH
class NetworkTransaction(models.Model):
id = models.UUIDField(default=uuid4, editable=False, primary_key=True) # noqa: A003
amount = models.PositiveBigIntegerField(
validators=[
MaxValueValidator(MAX_POINT_VALUE),
MinValueValidator(MIN_POINT_VALUE),
]
)
fee = models.CharField(blank=True, choices=ACCEPTED_FEE_CHOICES, max_length=17)
recipient = models.CharField(max_length=VERIFY_KEY_LENGTH)
class Meta:
abstract = True
|
achalpatel/thenewboston-python | src/thenewboston/utils/exceptions.py | <gh_stars>100-1000
class NetworkException(Exception):
pass
|
rymurr/q | q/__init__.py | from parser import parse
from unparser import format_bits
from conn import connect
|
rymurr/q | tickerplant.py | <filename>tickerplant.py<gh_stars>0
from gevent import monkey; monkey.patch_all()
import gevent
import socket
import array
import time
import cStringIO
import bitstring
from q.unparser import format_bits
from q.utils import get_header
from q.parser import parse
def foo():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost',5010))
login = array.array('b','' + '\x03\x00') #null terminated signed char array (bytes)
sock.send(login.tostring())
result = sock.recv(1) #blocking recv
sock.send(format_bits('.u.sub[`trade;`]', async=True, symbol=False, endianness='be').tobytes())
while True:
data=cStringIO.StringIO()
header = sock.recv(8)
data.write(header)
data.reset()
_,size = get_header(bitstring.ConstBitStream(bytes=data.read()))
print size
while True:
data.write(sock.recv(size))
if data.tell() < size:
continue
else:
break
data.reset()
xxx = bitstring.ConstBitStream(bytes=data.read())
yield parse(xxx)[-1]
if __name__ == '__main__':
for i in foo():
print i
|
rymurr/q | q/utils.py | import datetime
import itertools
from protocol import types, inv_types, header_format, MILLIS, Y2KDAYS, NULL, BYTE, INT
class iter_char(object):
def __init__(self, bstream, endianness):
self.bstream = bstream
self.endianness = endianness
def __iter__(self):
while self.bstream.pos < self.bstream.len:
x = self.bstream.read(format(BYTE,self.endianness))
yield x
def str_convert(bstream, endianness):
return ''.join([chr(i) for i in itertools.takewhile(lambda x: x!=0, iter_char(bstream, endianness))])
def format(val_type, endianness):
type_spec = types[val_type]
return type_spec[0]+endianness+':'+type_spec[1]
def format_list(val_type, endianness, length):
type_spec = types[val_type]
return str(length)+'*'+type_spec[0]+endianness+':'+type_spec[1]
def get_header(bstream):
endian = bstream.read(8).int
msg_type = bstream.read(8).int
bstream.read(16)
endianness = 'le' if endian == 1 else 'be'
size = bstream.read(format(INT, endianness))
return endianness, size
def get_date_from_q(i):
m = i + 24000
year = m/12
month = m % 12+1
day = 1
return datetime.datetime(year, month, day)
def get_hour(i):
if i/3600000 > 0:
hour = (i/1000)/3600
minute = ((i/1000)/60) % 60
second = (i/1000) % 60
micro = i % 1000
elif i/3600 > 0:
hour = i/3600
minute = (i/60) % 60
second = i % 60
micro = 0
else:
hour = i/60
minute = i % 60
second = 0
micro = 0
return datetime.time(hour, minute, second, micro)
def format_raw_list(val_type, length):
type_spec = types[val_type]
return type_spec[2], length*int(type_spec[1])
|
rymurr/q | q/conn.py | <filename>q/conn.py
'''
TODO sort out send and recieve in cursor function! Lots of mess there
'''
import socket
import array
import cStringIO
from bitstring import ConstBitStream
from parser import parse
from unparser import format_bits
from utils import get_header
def connect(host = 'localhost', port = 5000, user = '', password = ''):
'''
return new q connection
'''
return Connection(host, port, user, password)
class Connection(object):
'''
connection class for q wrapper
TODO make it look/act like DB-API 2.0 compliant db interface
see http://www.python.org/dev/peps/pep-0249/
'''
def __init__(self, host='localhost', port=5000, user='', password = ''):
self.host=host
self.port=port
self.user=user + ':' + password
self.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect()
def close(self):
'''
close socket and end connection
'''
self.sock.close()
def connect(self):
'''
make connection or throw an error
'''
try:
self.sock.connect((self.host,self.port))
login = array.array('b',self.user + '\x03\x00') #null terminated signed char array (bytes)
self.sock.send(login.tostring())
result = self.sock.recv(1) #blocking recv
if not result:
raise Exception("access denied")
except:
raise Exception ('unable to connect to host')
def execute(self, query):
self._send(query)
return self._receive()
def _send(self, query):
message = format_bits(query, async=True, symbol=False, endianness='be')
#print message
self.last_outgoing=message
self.sock.send(message.tobytes())
#self.sock.send(array.array('b',"1+2\0").tostring())#message.bytes)
def _receive(self):
"""read the response from the server"""
bytes = self._recv_size()
val = parse(ConstBitStream(bytes=bytes))
return val
def _recv_size(self, size=8192):
"""read size bytes from the socket."""
data=cStringIO.StringIO()
recv_size=size
self.sock.settimeout(0.1)
while True:
try:
data.write(self.sock.recv(size))
except:
break
if data.tell() < recv_size:
break
else:
recv_size += size
data.reset()
return data.read()
|
rymurr/q | q/tests/test_protocol.py | <reponame>rymurr/q
'''
tests come from examples given here:
http://code.kx.com/wiki/Reference/ipcprotocol
types are found here:
http://www.kx.com/q/d/q1.htm
'''
import pandas
import datetime
import numpy as np
from collections import OrderedDict
from nose.tools import assert_almost_equal, assert_items_equal
from q.parser import parse
from q.unparser import format_bits
def assert_all_equal(input_a, input_b):
if len(input_a) != len(input_b):
return False
for i in range(len(input_a)):
if not (input_a[i] == input_b[i]).all():
return False
return True
def test_int():
data = 1
bits = b'0x010000000d000000fa01000000'
assert data == parse(format_bits(data))
assert data == parse(bits)
assert bits == format_bits(data).__str__()
def test_int_vector():
data = np.array([1])
bits = b'0x010000001200000006000100000001000000'
assert data == parse(format_bits(data))
assert data == parse(bits)
assert bits == format_bits(data).__str__()
def test_byte_vector():
data = np.array([0,1,2,3,4], dtype=np.int8)
bits = b'0x01000000130000000400050000000001020304'
assert (data == parse(format_bits(data))).all()
assert (data == parse(bits)).all()
assert bits == format_bits(data)
def test_list():
data = [np.array([0,1,2,3,4], dtype=np.int8)]
bits = b'0x01000000190000000000010000000400050000000001020304'
assert_all_equal(data, parse(format_bits(data)))
assert_all_equal(data, parse(bits))
assert bits == format_bits(data)
def test_simple_dict():
data = {'a':2,'b':3}
bits = b'0x0100000021000000630b0002000000610062000600020000000200000003000000'
assert data == parse(format_bits(data))
assert data == parse(bits)
assert bits == format_bits(data).__str__()
def test_ordered_dict():
data = OrderedDict({'a':2,'b':3})
bits = b'0x01000000210000007f0b0102000000610062000600020000000200000003000000'
assert data == parse(format_bits(data))
assert data == parse(bits)
assert bits == format_bits(data).__str__()
def test_dict_vector():
data = {'a':[2], 'b':[3]}
bits = b'0x010000002d000000630b0002000000610062000000020000000600010000000200000006000100000003000000'
assert data == parse(format_bits(data))
assert data == parse(bits)
assert bits == format_bits(data).__str__()
def test_table_simple():
data = pandas.DataFrame({'a':[1,2],'b':[3,4]})
bits = b'0x01000000370000006200630b00020000006100620000000200000006000200000001000000020000000600020000000300000004000000'
assert bits == format_bits(data).__str__()
assert (data == parse(format_bits(data))).all().all()
assert (data.values == parse(bits).values).all()
def test_table_ordered():
data = pandas.DataFrame({'a':[1,2],'b':[3,4]})
bits = b'0x01000000370000006201630b00020000006100620000000200000006030200000001000000020000000600020000000300000004000000'
assert (data.values == parse(bits).values).all()
assert bits == format_bits(data, sort_on='a').__str__()
assert (data == parse(format_bits(data))).all().all()
def test_keyed_table():
data = pandas.DataFrame({'a':[2],'b':[3]}).set_index('a')
bits = b'0x010000003f000000636200630b00010000006100000001000000060001000000020000006200630b0001000000620000000100000006000100000003000000'
assert (data.values == parse(bits).values).all()
assert bits == format_bits(data, with_index=True).__str__()
assert (data == parse(format_bits(data, with_index=True))).all().all()
def test_sorted_keyed_table():
data = pandas.DataFrame([{'a':2,'b':3}]).set_index('a')
bits = b'0x010000003f0000007f6201630b00010000006100000001000000060001000000020000006200630b0001000000620000000100000006000100000003000000'
assert (data.values == parse(bits).values).all()
assert bits == format_bits(data, with_index=True, sort_on='a').__str__()
assert (data == parse(format_bits(data, with_index=True))).all().all()
def test_function():
data = '{x+y}'
bits = b'0x010000001500000064000a00050000007b782b797d'
assert data == parse(bits)
assert bits == format_bits(data, function=True).__str__()
assert data == parse(format_bits(data, function=True))
def test_non_root_function():
data = '.d{x+y}'
bits = b'0x01000000160000006464000a00050000007b782b797d'
assert data == parse(bits)
assert bits == format_bits(data, function=True).__str__()
assert data == parse(format_bits(data, function=True))
def test_bool():
data = False
bits = b'0x010000000a000000ff00'
assert data == parse(bits)
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_bool_vector():
data = [False]
bits = b'0x010000000f00000001000100000000'
assert data == parse(bits)
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_short():
data = 1
bits = b'0x010000000b000000fb0100'
assert data == parse(bits)
assert bits == format_bits(data, short=True).__str__()
assert data == parse(format_bits(data, short=True))
def test_short_vector():
data = [1]
bits = b'0x01000000100000000500010000000100'
assert data == parse(bits)
assert bits == format_bits(data, short=True).__str__()
assert data == parse(format_bits(data, short=True))
def test_long():
data = 1L
bits = b'0x0100000011000000f90100000000000000'
assert data == parse(bits)
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_long_vector():
data = [1L]
bits = b'0x01000000160000000700010000000100000000000000'
assert data == parse(bits)
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_real():
data = 2.3
bits = b'0x010000000d000000f833331340'
assert_almost_equal(data, parse(bits))
assert_almost_equal(data, parse(format_bits(data, short=True)))
assert bits == format_bits(data, short=True).__str__()
def test_real_vector():
data = [2.3]
bits = b'0x010000001200000008000100000033331340'
assert_almost_equal(data[0], parse(bits)[0])
assert_almost_equal(data, parse(format_bits(data, short=True)))
assert bits == format_bits(data, short=True).__str__()
def test_float():
data = 2.3
bits = b'0x0100000011000000f76666666666660240'
assert_almost_equal(data, parse(bits))
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_float_vector():
data = [2.3]
bits = b'0x01000000160000000900010000006666666666660240'
assert_almost_equal(data[0], parse(bits)[0])
assert bits == format_bits(data).__str__()
assert data == parse(format_bits(data))
def test_month():
data = datetime.datetime(2012,6,1)
bits = b'0x010000000d000000f395000000'
assert data == parse(bits)
def test_month_vector():
data = [datetime.datetime(2012,6,1)]
bits = b'0x01000000120000000d000100000095000000'
assert data == parse(bits)
def test_date():
data = datetime.datetime(2012,6,8)
bits = b'0x010000000d000000f2be110000'
assert data == parse(bits)
def test_date_vector():
data = [datetime.datetime(2012,6,8)]
bits = b'0x01000000120000000e0001000000be110000'
assert data == parse(bits)
def test_minute():
data = datetime.time(8,31)
bits = b'0x010000000d000000efff010000'
assert data == parse(bits)
def test_minute_vector():
data = [datetime.time(8,31)]
bits = b'0x0100000012000000110001000000ff010000'
assert data == parse(bits)
def test_second():
data = datetime.time(8,31,53)
bits = b'0x010000000d000000eef9770000'
assert data == parse(bits)
def test_second_vector():
data = [datetime.time(8,31,53)]
bits = b'0x0100000012000000120001000000f9770000'
assert data == parse(bits)
def test_time():
data = datetime.time(8,31,53,981)
bits = b'0x010000000d000000ed7da8d401'
assert data == parse(bits)
def test_time_vector():
data = [datetime.time(8,31,53,981)]
bits = b'0x01000000120000001300010000007da8d401'
assert data == parse(bits)
def test_datetime():
data = datetime.datetime(2012,6,8,8,31,53,981000)
bits = b'0x0100000011000000f1e9941f015bbeb140'
assert data == parse(bits)
def test_datetime_vector():
data = [datetime.datetime(2012,6,8,8,31,53,981000)]
bits = b'0x01000000160000000f0001000000e9941f015bbeb140'
assert data == parse(bits)
def test_nanodatetime():
data = datetime.datetime(2012,6,8,8,31,53,981000)
bits = b'0x0100000011000000f040598f3fd94b7205'
assert data == parse(bits)
def test_nanodatetime_vector():
data = [datetime.datetime(2012,6,8,8,31,53,981000)]
bits = b'0x010000001600000010000100000040598f3fd94b7205'
assert data == parse(bits)
|
rymurr/q | rubbish/translate.py | <reponame>rymurr/q<filename>rubbish/translate.py
import ast
import astunparse
from cStringIO import StringIO
class Transformer(ast.NodeTransformer):
def __init__(self):
super(Transformer, self).__init__()
def visit_Call(self, node):
child = self.visit(node.func)
if isinstance(child.value, ast.Call):
keyword = nameAndArgsToKeyword(node.func.attr, node.args+node.keywords)
isThere = appendKeyword(child.value.keywords, keyword)
if not isThere:
child.value.keywords.append(keyword)
return child.value
else:
return node
def appendKeyword(keywords, keyword):
for k in keywords:
if k.arg == keyword.arg:
combine(k, keyword)
return True
return False
def combine(keyword, newKeyword):
if isinstance(keyword.value, ast.List):
keyword.value.elts.extend(newKeyword.value.elts)
elif isinstance(keyword.value, ast.Dict):
keyword.value.keys.extend(newKeyword.value.keys)
keyword.value.values.extend(newKeyword.value.values)
groupbySet = set(['groupby',])
aggregationSet = set(['last','mean'])
def nameAndArgsToKeyword(name, args):
if name in groupbySet:
arg = 'groupby'
keyword = ast.keyword(arg=arg, value = ast.Dict(keys=args, values=args))
elif name in aggregationSet:
arg = 'aggregate'
keyword = ast.keyword(arg=arg, value = ast.Dict(keys=[ast.Str(name),], values=[ast.List(elts=args)]))
else:
raise NotImplementedError
return keyword
def transformHelper(command):
node = preprocess(command)
print ast.dump(node)
try:
print astunparse.unparse(node)
except:
pass
return node
def preprocess(command):
transform = Transformer()
node = ast.parse(command)
return transform.visit(node)
def translate(command):
node = preprocess(command)
parser = Parser()
parser.visit(node)
return parser.writer.getvalue()
class Parser(ast.NodeVisitor):
def __init__(self):
super(Parser, self).__init__()
self.writer = StringIO()
def visit(self, node):
print node
return super(Parser, self).visit(node)
def visit_Mult(self, node):
return '*'
def visit_BinOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return '{0}[{1};{2}]'.format(op, left, right)
def visit_Num(self, node):
return node.n
def visit_Name(self, node):
return '`' + node.id
def visit_Add(self, node):
return '+'
def visit_Expr(self, node):
self.writer.write(self.visit(node.value) + '\n')
def visit_Module(self, node):
list(map(self.visit, node.body))
def visit_Call(self, node):
attr = node.func.attr
statement = ''
if (attr == 'select'):
table = self.visit(node.func.value)
constraints = ';'.join(map(self.visit, node.args))
groupby = ';'.join(map(self.visit, [i for i in node.keywords if i.arg == 'groupby']))
aggregation = ';'.join(map(self.visit, [i for i in node.keywords if i.arg == 'aggregate']))
statement += '?[' + self.visit(node.func.value) + ';(' + ';'.join(map(self.visit, node.args)) + ');'+groupby+';'+aggregation+']'
else:
raise NotImplementedError
return statement
def visit_Eq(self, node):
return '='
def visit_In(self, node):
return 'in'
def visit_Str(self, node):
return '`'+node.s
def visit_keyword(self, node):
return self.visit(node.value)
def visit_Dict(self, node):
return '(' + ' '.join(map(self.visit, node.keys)) + ')!(' + ' '.join(map(self.visit, node.values)) + ')'
def visit_Compare(self, node):
left = self.visit(node.left)
ops = list(map(self.visit, node.ops))
comparators = list(map(self.visit, node.comparators))
return ';'.join(('({0};{1};{2})'.format(op, left, comparator) for op, comparator in zip(ops, comparators)))
def visit_List(self, node):
return 'enlist ' + ' '.join(list(map(self.visit, node.elts)))
|
rymurr/q | kernel.py | <reponame>rymurr/q<gh_stars>0
from IPython.kernel.zmq.kernelbase import Kernel
import json
import pandas
import q
class KdbKernel(Kernel):
implementation = 'Kdb'
implementation_version = '0.1'
language = 'kdb-q'
language_version = '3.1'
language_info = {'mimetype': 'text/plain'}
banner = "Kdb+ kernel"
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
codeList = code.split('\n')
pyCommands = [i for i in codeList if '.py' == i[:3]]
qCommands = '\n'.join([i for i in codeList if not '.py' == i[:3]])
returnCode = self.getPyResults(pyCommands) + self.getQResults(qCommands)
if not silent:
stream_content = {'name': 'stdout', 'text': returnCode}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
def getPyResults(self, pyCommands):
returnString = ''
for command in pyCommands:
if 'connection_details' in command:
self.connection_details = getDetails(command)
self.connection = connect(self.connection_details)
connectStr = 'Connection to q at {0} made successfully\n' if self.connection else 'Connection to q at {0} failed\n'
returnString += connectStr.format(self.connection_details)
return returnString
def getQResults(self, qCommands):
if len(qCommands) == 0:
return ''
if not self.connection:
return 'No connection or connection details'
try:
result = self.connection.execute(qCommands.encode('ascii'))
except Exception as e:
print e
print qCommands.encode('ascii')
result = ''
return formatQ(result)
def formatQ(result):
formatter = getattr(result,'to_html', None)
if formatter:
return formatter()
return str(result)
def connect(details):
try:
return q.connect(details['hostname'].encode('ascii'), details['port'], details['username'].encode('ascii'), details['password'].encode('ascii'))
except Exception as e:
print e
return None
def getDetails(connectionStr):
connectionDetail = connectionStr.split('=')[-1]
print connectionDetail
connectionObj = json.loads(connectionDetail)
for detail in ('hostname', 'port', 'username', 'password'):
if detail not in connectionObj:
return None
return connectionObj
if __name__ == '__main__':
from IPython.kernel.zmq.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=KdbKernel)
|
rymurr/q | q/unparser.py | from bitstring import pack, ConstBitStream
import pandas
import numpy as np
import datetime
from protocol import types, inv_types, header_format, MILLIS, Y2KDAYS, NULL, BYTE, INT
from utils import str_convert, format, format_list, get_header, get_date_from_q, get_hour, format_raw_list
from collections import OrderedDict
def format_bits(data, endianness = 'le', with_index=False, sort_on=None, async=False, symbol = True, function = False, short = False):
endian = 1 if endianness == 'le' else 0
data_format = header_format.format(endianness)
data = parse_on_the_wire(data, endianness, with_index=with_index, sort_on=sort_on, symbol=symbol, function = function, short=short)
length = len(data)/8 + 8
objects = {'endian':endian, 'async':1 if async else 0, 'length': length, 'data':data}
bstream = pack(data_format, **objects)
return bstream
#This is looking like it needs a refactor!
def parse_on_the_wire(data, endianness, attributes = 0, with_index=False, sort_on = None, symbol = True, function = False, short = False):
if with_index and type(data) == pandas.DataFrame:
keys = parse_on_the_wire(pandas.DataFrame(data.index), endianness, attributes, False, True if sort_on else None, short=short)
vals = parse_on_the_wire(data, endianness, attributes, False, short=short)
data_format = 'int{0}:8=type, bits'.format(endianness)
bstream = pack(data_format, (keys+vals), type='127' if sort_on else '99')
elif isinstance(data,np.ndarray):
dtype = inv_types[data.dtype.type]
if data.dtype.type == np.object_:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits'.format(endianness)
bstream = pack(data_format, sum([parse_on_the_wire(i, endianness, short=short) for i in data]), type = dtype[0], attributes=attributes, length=len(data))
else:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, {3}*{1}{0}:{2}'.format(endianness, dtype[1], dtype[2], len(data))
bstream = pack(data_format, *data, type=dtype[0], attributes=attributes, length=len(data))
elif isinstance(data, list):
type_set = set([type(i) for i in data])
if len(type_set) == 1 and not list(type_set)[0] == np.ndarray:
if short:
temp = list(type_set)[0]
if temp == float:
dtype = inv_types['float']
if temp == int:
dtype = inv_types['int']
else:
dtype = inv_types[list(type_set)[0]]
if list(type_set)[0] == str or list(type_set)[0] == list:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits'.format(endianness)
bstream = pack(data_format, sum([parse_on_the_wire(i, endianness, short=short) for i in data]), type = -dtype[0], attributes=attributes, length=len(data))
else:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, {3}*{1}{0}:{2}'.format(endianness, dtype[1], dtype[2], len(data))
bstream = pack(data_format, *data, type=-dtype[0], attributes=attributes, length=len(data))
else:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits=data'.format(endianness)
bits = sum([parse_on_the_wire(i, endianness, short=short) for i in data])
bstream = pack(data_format, data=bits, type=-dtype[0], attributes=attributes, length=len(data))
elif type(data) == dict:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, bits=data'.format(endianness)
keys = parse_on_the_wire(data.keys(), endianness, short=short)
vals = parse_on_the_wire(data.values(), endianness, short=short)
bits = keys + vals
bstream = pack(data_format, data=bits, type=dtype[0], attributes=attributes, length=len(data))
elif type(data) == OrderedDict:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, bits=data'.format(endianness)
keys = parse_on_the_wire(data.keys(), endianness, 1, short=short)
vals = parse_on_the_wire(data.values(), endianness, short=short)
bits = keys + vals
bstream = pack(data_format, data=bits, type=dtype[0], attributes=attributes, length=len(data))
elif isinstance(data, str) and function:
context, function = data.split('{')
context = context.replace('.','')
function = '{' + function
data_format = 'int{0}:8=lambdatype, bits=context, bits=function'
bstream = pack(data_format.format(endianness), lambdatype=100, context = parse_on_the_wire(context, endianness), function = parse_on_the_wire(function, endianness, symbol=False) )
elif isinstance(data, str) and symbol:
bstream = pack('{0}*hex:8'.format(len(data)),*[hex(ord(i)) for i in data]) + ConstBitStream(b'0x00')
elif isinstance(data, str):
dtype = inv_types['str']
data_format = 'int{1}:8=type, int{1}:8=attributes, int{1}:32=length, {2}*hex:8'
bstream = pack(data_format.format('', endianness, len(data)),type=-dtype[0], attributes=attributes, length=len(data), *[hex(ord(i)) for i in data])
elif type(data) == pandas.DataFrame:
is_sorted = 1 if sort_on else 0
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, int{0}:8=tabattrib, int{0}:8=dicttype, bits=cols,int{0}:8=typearray, int{0}:8=attributes, int{0}:32=length, bits=vals'.format(endianness)
cols = parse_on_the_wire(data.columns.values, endianness, short=short)
vals = sum(parse_on_the_wire(col.values, endianness, 3 if i==sort_on else 0, short=short) for i,col in data.iterkv())
# indexes = parse_on_the_wire(data.index.values, endianness, 3)
bstream = pack(data_format, cols=cols, type=dtype[0], typearray=0, attributes=0, length=len(data.columns), vals=vals, tabattrib=is_sorted, dicttype=99)
elif type(data) == float and short:
dtype = inv_types['float']
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
elif type(data) == int and short:
dtype = inv_types['int']
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
else:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
return bstream
|
rymurr/q | q/parser.py | import pandas
import numpy as np
import datetime
from bitstring import ConstBitStream
from protocol import types, inv_types, header_format, MILLIS, Y2KDAYS, NULL, BYTE, INT, Y2KMILLIS
from utils import str_convert, format, format_list, get_header, get_date_from_q, get_hour, format_raw_list
from collections import OrderedDict
def get_symbol(bstream, endianness, val_type):
return str_convert(bstream, endianness)
def get_bool(bstream, endianness, val_type):
data = -1
while data == -1:
data = [bool(x) for i, x in enumerate(
bstream.readlist(format_list(val_type, '', 2))) if i%2 == 1][0]
return data
def get_month(bstream, endianness, val_type):
return get_date_from_q(bstream.read(format(val_type, endianness)))
def get_date(bstream, endianness, val_type):
return datetime.datetime.fromordinal(bstream.read(format(val_type, endianness))+Y2KDAYS)
def get_datetime(bstream, endianness, val_type):
dt = bstream.read(format(val_type, endianness))
return datetime.datetime.fromordinal(int(dt)+Y2KDAYS) + datetime.timedelta(milliseconds = dt%1*MILLIS)
def get_nanodatetime(bstream, endianness, val_type):
dt = bstream.read(format(val_type, endianness))/1E9
return datetime.datetime.utcfromtimestamp(dt+Y2KMILLIS)
def get_bool_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [bool(x) for i,x in enumerate(bstream.readlist(format_list(val_type, '', 2*length))) if i%2 == 1]
return data
def get_symbol_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [str_convert(bstream, endianness) for i in range(length)]
return data
def get_error(bstream, endianness, val_type):
data = get_char(bstream, endianness, val_type)
raise Exception(data)
def get_char(bstream, endianness, val_type):
data = str_convert(bstream, endianness)
return data
def get_char_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
nptype, bstype = format_raw_list(val_type, length)
data = bstream.read(bstype).bytes
return data
def get_month_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [get_date_from_q(x) for x in bstream.readlist(format_list(val_type, endianness, length))]
return data
def get_date_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [datetime.datetime.fromordinal(x+Y2KDAYS) for x in bstream.readlist(format_list(val_type, endianness, length))]
return data
def get_datetime_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
dt = bstream.readlist(format_list(val_type, endianness, length))
data = [datetime.datetime.fromordinal(int(x)+Y2KDAYS)+datetime.timedelta(milliseconds=x%1*MILLIS) for x in dt]
return data
def get_nanodatetime_list(bstream, endianness, val_type):
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
dt = bstream.readlist(format_list(val_type, endianness, length))
data = [datetime.datetime.utcfromtimestamp(x/1E9+Y2KMILLIS) for x in dt]
return data
def get_table(bstream, endianness, val_type):
attributes = bstream.read(8).int
data = pandas.DataFrame(get_data(bstream, endianness))
return data
def get_dict(bstream, endianness, val_type):
keys = get_data(bstream, endianness)
vals = get_data(bstream, endianness)
if isinstance(keys, pandas.DataFrame):
data = pandas.concat([keys, vals], axis = 1)
data = data.set_index(list(keys.columns))
else:
data = dict(zip(keys, vals))
return data
def get_lambda_func(bstream, endianness, val_type):
context = str_convert(bstream, endianness)
data = ('' if context == '' else '.') + context + get_data(bstream, endianness)
return data
def get_ordered_dict(bstream, endianness, val_type):
keys = get_data(bstream, endianness)
vals = get_data(bstream, endianness)
if isinstance(keys, pandas.DataFrame):
data = pandas.concat([keys, vals], axis = 1)
data = data.set_index(list(keys.columns))
else:
data = OrderedDict(zip(keys, vals))
return data
def get_data(bstream, endianness):
val_type = bstream.read(8).int
if val_type in int_types:
data = int_types[val_type](bstream, endianness, val_type)
elif -20 < val_type < -10:
data = get_hour(bstream.read(format(val_type, endianness)))
elif val_type < 0:
data = bstream.read(format(val_type, endianness))
elif 20 > val_type > 10:
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [get_hour(x) for x in bstream.readlist(format_list(val_type, endianness, length))]
elif 10 > val_type > 0:
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
nptype, bstype = format_raw_list(val_type, length)
data = np.fromstring(bstream.read(bstype).bytes, dtype=nptype)
#data = bstream.readlist(format_list(val_type, endianness, length))
elif val_type > 90:
data = []
else:
attributes = bstream.read(8).int
length = bstream.read(format(INT, endianness))
data = [get_data(bstream, endianness) for _ in range(length)]
return data
def parse(bits):
bstream = ConstBitStream(bits)
endianness, size = get_header(bstream)
while (bstream.pos < 8*size):
data = get_data(bstream, endianness)
return data
int_types = {-11:get_symbol,
-1:get_bool,
-12:get_nanodatetime,
-13:get_month,
-14:get_date,
-15:get_datetime,
-16:get_nanodatetime,
-20:[],
-128:get_error,
1:get_bool_list,
10:get_char_list,
11:get_symbol_list,
12:get_nanodatetime_list,
13:get_month_list,
14:get_date_list,
15:get_datetime_list,
16:get_nanodatetime_list,
20:[],
98:get_table,
99:get_dict,
100:get_lambda_func,
127:get_ordered_dict
}
|
rymurr/q | q/protocol.py | <reponame>rymurr/q<gh_stars>0
'''
Primary source of kdb ipc protocol definitions
here we define all the q data types and their on the wire form
A parser is used to convert between the python format and kdb/q format
types are found here:
http://www.kx.com/q/d/q1.htm
Note on dates and times
dates are number of days since Jan 1 2000
times are number of hours/minutes/seconds/millis
datetimes are float64 days since Jan 1 (fractional day is converted to millis and parsed)
TODO:
need some docstrings
add in async/concurrency stuff for speed
profile!
integrate back into connection class and do full tests
clarify handling of OrderedDict
add in pd.Series
clarify handling of sorted and keyed tables
add indicies (associated with keys)
'''
import pandas
import datetime
import numpy as np
from bitstring import ConstBitStream
from collections import OrderedDict
from time import mktime
#types: -ve is atomic +ve is vector
types = {
-1: ('int', '4'), #bool
1: ('int', '4', np.bool), #bool vector
-4: ('int','8'), #byte
4: ('int','8', np.int8), #byte vector
-5: ('int', '16'), #short
5: ('int', '16', np.int16), #short vector
-6: ('int','32'), #int
6: ('int','32', np.int32), #int vector
-7: ('int','64'), #long
7: ('int','64', np.int64), #long vector
-8: ('float','32'), #real
8: ('float','32', np.float32), #real vector
-9: ('float','64'), #float
9: ('float','64', np.float64), #float vector
-10:('int', '8'), #char
10:('int', '8', np.char), #char vector
-11:('symbol',''), #symbol
11:('symbol',''), #symbol vector
-12:('int', '64'), #nanos
12: ('int', '64'), #nanos vector
-13:('int', '32'), #month
13:('int', '32'), #month vector
-14:('int', '32'), #date
14:('int', '32'), #date vector
-15:('float', '64'), #datetime
15:('float', '64'), #datetime vector
-16:('int', '64'), #nano datetime
16:('int', '64'), #nano datetime vector
-17:('int', '32'), #hour
17:('int', '32'), #hour vector
-18:('int', '32'), #second
18:('int', '32'), #second vector
-19:('int', '32'), #time
19:('int', '32'), #time vector
0:('list','0'), #list
}
inv_types = {
bool: (1, 'int', '8'),
np.bool: (-1, 'int', '8'),
np.int8: (4, 'int','8'),
'int': (-5, 'int', '16'),
np.int16: (5, 'int', '16'),
long: (-7, 'int', '64'),
np.int64: (7, 'int', '64'),
float: (-9, 'float', '64'),
'float': (-8, 'float', '32'),
np.float32: (8, 'float', '32'),
np.float64: (8, 'float', '64'),
int: (-6, 'int', '32'),
list: (0, '', ''),
dict: (99, '', ''),
str: (-11, 'hex', '8'),
'str': (-10, 'hex', '8'),
OrderedDict: (127, '', ''),
np.int64: (6, 'int', '32'),
np.int8: (4, 'int', '8'),
np.object_: (11, 'hex', 8,),
pandas.DataFrame: (98, '', ''),
}
INT = -6
BYTE = -4
NULL = ConstBitStream('0x00')
Y2KDAYS = datetime.datetime(2000,1,1).toordinal()
Y2KMILLIS = mktime(datetime.datetime(2000,1,1).utctimetuple())
MILLIS = 8.64E7
#header format
header_format = 'int{0}:8=endian, int{0}:8=async, pad:16, int{0}:32=length, bits=data'
|
CodySchluenz/CTFd | serve.py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--port", help="Port for debug server to listen on", default=4000)
parser.add_argument(
"--profile", help="Enable flask_profiler profiling", action="store_true"
)
# parser.add_argument(
# "--disable-gevent",
# help="Disable importing gevent and monkey patching",
# action="store_false",
# )
args = parser.parse_args()
# if args.disable_gevent:
# print(" * Importing gevent and monkey patching. Use --disable-gevent to disable.")
# from gevent import monkey
# monkey.patch_all()
# Import not at top of file to allow gevent to monkey patch uninterrupted
from CTFd import create_app
app = create_app()
if args.profile:
from flask_debugtoolbar import DebugToolbarExtension
import flask_profiler
app.config["flask_profiler"] = {
"enabled": app.config["DEBUG"],
"storage": {"engine": "sqlite"},
"basicAuth": {"enabled": False},
"ignore": ["^/themes/.*", "^/events"],
}
flask_profiler.init_app(app)
app.config["DEBUG_TB_PROFILER_ENABLED"] = True
app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False
toolbar = DebugToolbarExtension()
toolbar.init_app(app)
print(" * Flask profiling running at http://127.0.0.1:4000/flask-profiler/")
app.run(debug=True, threaded=True, host="127.0.0.1", port=args.port)
|
CosmicElysium/GmailClientExtractor | clientextractor.py | <filename>clientextractor.py
'''
Note: This module uses source code provided by Google Inc.
The original oauth2.py script can be found at:
https://github.com/google/gmail-oauth2-tools/blob/master/python/oauth2.py
'''
import imaplib
import email
import lxml
import sys
import urllib
import json
import re
import datetime
import xlsxwriter
from os import listdir
import calendar
from pandas import read_html
from optparse import OptionParser
# Gmail credentials file path
CREDENTIALS_PATH = "./creds_filled.data"
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
#SCOPE= 'https://www.googleapis.com/auth/gmail.readonly'
SCOPE= 'https://mail.google.com/'
ENDOFHEADER= "Number\r\n"
MONTH_ABBR_NUMBERS = {v + ".": k for k,v in enumerate(calendar.month_abbr)}
MONTH_NUMBERS = {v: k for k,v in enumerate(calendar.month_name)}
MONTH_NUMBERS_INVERSE = {k: v for k,v in enumerate(calendar.month_name)}
MONTH_ABBR_NUMBERS_INVERSE = {k: v for k,v in enumerate(calendar.month_abbr)}
CURRENTYEAR = 2017
class Client:
def __init__(self, ref_number, update_datetime, created_datetime, firstName,
lastName, email, airlines, flight_number, origin, arrival_datetime, arrival_weekday ):
self.ref_number= ref_number
self.update_datetime= update_datetime
self.created_datetime= created_datetime
self.firstName= firstName
self.lastName= lastName
self.email= email
self.airlines= airlines
self.flight_number= flight_number
self.origin= origin
self.arrival_datetime= arrival_datetime
self.arrival_weekday= arrival_weekday
def GetDataSetAsList():
return [self.firstName, self.lastName, self.flight_number, self.arrival_weekday, self.getArrivalDateAsString(),
self.getArrivalTimeAsString, "TODO", "TODO", "TODO", "TODO"]
def setDateTimeLastUpdated(self, year, month, day, hour, minute):
self.dateTimeUpdated = datetime.datetime(year, month, day, hour, minute)
def setDateTimeCreated(self, year, month, day, hour, minute):
self.dateTimeCreated = datetime.datetime(year, month, day, hour, minute)
def setReferenceNumber(self, refNumber):
self.referenceNumber = refNumber
def setFirstName(self, firstName):
self.firstName = firstName
def setLastName(self, lastName):
self.lastName = lastName
def getArrivalDateAsString(self):
return self.arrival_datetime.day + " " + MONTH_ABBR_NUMBERS_INVERSE[self.arrival_datetime.day] + " " + self.arrival_datetime.year
def getArrivalTimeAsString(self):
return self.arrival_datetime.hour + ":" + self.arrival_datetime.minute
class ClientExtractor:
def __init__(self):
self.auth_string= ""
self.clientSet = []
def ExecuteSequence(self):
self.InitializeCredentials()
self.GetRawClientList()
self.ConvertListToClients()
self.WriteSpreadsheet()
def InitializeCredentials(self):
credentials = MiscTools.GetGmailCreds(CREDENTIALS_PATH)
self.username = credentials['USERNAME']
self.client_id = credentials['CLIENTID']
self.client_token = credentials['CLIENTTOKEN']
def GetRawClientList(self):
if self.auth_string == "":
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % OAuth2Tools.GeneratePermissionUrl(self.client_id, SCOPE)
authorization_code = raw_input('Enter verification code: ')
auth_tokens= OAuth2Tools.AuthorizeTokens(self.client_id, self.client_token, authorization_code)
self.auth_string= OAuth2Tools.GenerateOAuth2String(self.username, auth_tokens['access_token'], base64_encode=False)
latestRawEmail= EmailTools.GetLatestEmail(self.username, self.auth_string)
latestEmail= EmailTools.ConvertRawToEmailMessage(latestRawEmail)
emailData= EmailTools.ConvertEmailMessageToData(latestEmail, 0)
self.dataList = DataTools.BreakDataStringToDataList(emailData)
def GetFakeRawClientList(self):
with open("./data/EmailData2.txt",'r') as emailData:
self.dataList = emailData.read()
def ConvertListToClients(self):
month = int(raw_input('Enter Month #: '))
day = int(raw_input('Enter Day #: '))
dateToGet = datetime.date(CURRENTYEAR, month, day)
self.dateFound = dateToGet
self.clientSet = DataTools.HtmlStringToClientList(self.dataList, dateToGet)
def WriteSpreadsheet(self):
updateNumber = 1
filePrefix = 'students_' + self.dateFound.day + MONTH_NUMBERS_INVERSE[self.dateFound.month] + '_update'
directoryFileList = listdir("./spreadsheets")
for eachFile in directoryFileList:
if filePrefix in eachFile:
updateNumber++
workbook = xlsxwriter.Workbook( filePrefix + updateNumber + '.xlsx')
worksheet = workbook.add_worksheet()
headers = ["First name", "Family name", "Airline Flight No.", "Arrival day Arrival Date",
"Arrival time (est.)", "Extra passengers", "Drop-off (University Residence)",
"Drop-off Address (other)", "Suburb"]
numberClients = len(clientSet)
for col,eachHeader in enumerate(headers):
worksheet.write(0, col, eachHeader)
for row, eachClient in enumerate(self.clientSet):
for col, eachData in enumerate(eachClient.GetDataSetAsList())
worksheet.write(row + 1, col, eachData)
workbook.close()
#check if flightnumbers match flights
#highlight updates
class DataTools:
@staticmethod
def HtmlStringToClientList(html_string, date):
clientList = []
htmlParsed = re.split("<tr>|</tr>", html_string)
betterParsed = htmlParsed[18:-3:2]
for client in betterParsed:
currentClient = read_html("<table>" + "<tr>" + client + "</tr>" + "</table>")[0].values.tolist()[0]
pickUpDate = currentClient[16]
###print pickUpDate
if pickUpDate == u'\xc2':
continue
pickUpDateParsed = pickUpDate.split()
monthName = pickUpDateParsed[1]
if "." in monthName:
pickUpDateObject = datetime.date(int(pickUpDateParsed[2]), MONTH_ABBR_NUMBERS[monthName], int(pickUpDateParsed[0]))
else:
pickUpDateObject = datetime.date(int(pickUpDateParsed[2]), MONTH_NUMBERS[monthName], int(pickUpDateParsed[0]))
if pickUpDateObject == date:
pickUpTime = MiscTools.TimeStringToTimeObject(currentClient[17])
pickUpDateTime = datetime.datetime(pickUpDateObject.year, pickUpDateObject.month, pickUpDateObject.day, pickUpTime.hour, pickUpTime.minute)
newClient = Client(currentClient[1], MiscTools.DateTimeStringToDateTimeObjects(currentClient[2]),
MiscTools.DateTimeStringToDateTimeObjects(currentClient[3]), currentClient[4], currentClient[5],
currentClient[6], currentClient[12], currentClient[13], currentClient[14], pickUpDateTime, currentClient[15])
clientList.append(newClient)
return clientList
@staticmethod
def BreakDataStringToDataList(dataString):
dataList = re.split('[0-9][0-9][0-9][0-9][0-9][0-9]\-[0-9][0-9][0-9][0-9][0-9][0-9]', dataString)
return dataList
@staticmethod
def SplitFirstWordOffString(string_to_split):
string_to_split = string_to_split.lstrip()
first_word = string_to_split.split(" ")[0]
new_string = string_to_split.replace(first_word,"")
new_string = new_string.lstrip()
return first_word, new_string
class MiscTools:
@staticmethod
def DateTimeStringToDateTimeObjects(dateTimeString):
#print dateTimeString
date, time = dateTimeString.split(' ')
day,month,year = date.split('/')
timeNumber, amPm = time.split(' ')
hour, minute = timeNumber.split('.')
if amPm == u"PM":
hour = int(hour) + 12
if hour == 24:
hour = 0
return datetime.datetime(int(year), int(month), int(day), int(hour), int(minute))
@staticmethod
def TimeStringToTimeObject(timeString):
hour = int(timeString[0:2])
if timeString[3:5] == '':
minute = 0
else:
minute = int(timeString[3:5])
if 'PM' in timeString :
hour = hour + 12
return datetime.time(hour, minute)
@staticmethod
def DatesAreCloseEnough(date1, date2, distanceInDays):
pass
@staticmethod
def GetGmailCreds(path_to_data_file):
credentials = {}
with open(path_to_data_file, 'r') as credsFile:
for line in credsFile:
(key, val) = line.split('=')
key = key.replace(" ","")
val = val.replace("\n","")
val = val.replace(" ","")
credentials[key] = val
return credentials
class EmailTools:
@staticmethod
def GetLatestEmail(EMAILUSER, auth_string):
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
result, data = imap_conn.uid('search', None, "ALL") # search and return uids instead
latest_email_uid = data[0].split()[-1]
result, data = imap_conn.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = data[0][1]
return raw_email
@staticmethod
def ConvertRawToEmailMessage(raw_email):
return email.message_from_string(raw_email)
#TODO:save text files of both raw emails to avoid data cap
@staticmethod
def ConvertEmailMessageToData(email_message, payload_index):
emailPayload= email_message.get_payload(payload_index)
dataDecodeable= emailPayload.get_payload(decode= True)
dataDecoded= dataDecodeable.decode('utf-8')
return dataDecoded
#startDataIndex= dataDecoded.find(ENDOFHEADER)
#return dataDecoded[(startDataIndex + len(ENDOFHEADER)):]
class OAuth2Tools:
@staticmethod
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
@staticmethod
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
@staticmethod
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
@staticmethod
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], OAuth2Tools.UrlEscape(param[1])))
return '&'.join(param_fragments)
@staticmethod
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (OAuth2Tools.AccountsUrl('o/oauth2/auth'), OAuth2Tools.FormatUrlParams(params))
@staticmethod
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = OAuth2Tools.AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
@staticmethod
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
@staticmethod
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
@staticmethod
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
|
masantiago/shaka-packager | packager/app/test/packager_test.py | <filename>packager/app/test/packager_test.py
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests utilizing the sample packager binary."""
import filecmp
import glob
import logging
import os
import re
import shutil
import subprocess
import tempfile
import unittest
import packager_app
import test_env
_TEST_FAILURE_COMMAND_LINE_MESSAGE = """
!!! To reproduce the failure, change the output files to an !!!
!!! existing directory, e.g. output artifacts to current !!!
!!! directory by removing /tmp/something/ in the following !!!
!!! command line. !!!
The test executed the following command line:
""".strip()
class StreamDescriptor(object):
"""Basic class used to build stream descriptor commands."""
def __init__(self, input_file):
self._buffer = 'input=%s' % input_file
self._output_file_name_base = os.path.splitext(
os.path.basename(input_file))[0]
def Append(self, key, value):
self._buffer += ',%s=%s' % (key, value)
# Generate an unique |_output_file_name_base| from some of the keys.
# We do not need all the keys as it is sufficient with the below keys.
if key == 'stream':
self._output_file_name_base += '-%s' % value
elif key == 'trick_play_factor':
self._output_file_name_base += '-trick_play_factor_%d' % value
elif key == 'skip_encryption':
self._output_file_name_base += '-skip_encryption'
return self
def GetOutputFileNameBase(self, output_file_prefix):
if output_file_prefix:
return '%s-%s' % (output_file_prefix, self._output_file_name_base)
else:
return self._output_file_name_base
def __str__(self):
return self._buffer
class DiffFilesPolicy(object):
"""Class for handling files comparison.
Attributes:
_allowed_diff_files: The list of files allowed to be different.
_exact: The actual list of diff_files must match the above list exactly,
i.e. all the files in the above list must be different.
_allow_updating_golden_files: When set to false, golden files will not be
updated for this test even if updating_golden_files is requested. This
is useful for tests generating different outputs in each run, which is
often used together when _allowed_diff_files is not empty.
"""
def __init__(self,
allowed_diff_files=None,
exact=True,
allow_updating_golden_files=True):
if allowed_diff_files:
self._allowed_diff_files = allowed_diff_files
else:
self._allowed_diff_files = []
self._exact = exact
self._allow_updating_golden_files = allow_updating_golden_files
def ProcessDiff(self, out_dir, gold_dir):
"""Compare test outputs with golden files.
Args:
out_dir: The test output directory.
gold_dir: The golden directory to be compared with.
Returns:
A list of diff messages when the files do not match; An empty list
otherwise or in update mode.
"""
if test_env.options.test_update_golden_files:
if self._allow_updating_golden_files:
self._UpdateGold(out_dir, gold_dir)
return []
else:
return self._DiffDir(out_dir, gold_dir)
def _DiffDir(self, out_dir, gold_dir):
# Get a list of the files and dirs that are different between the two top
# level directories.
diff = filecmp.dircmp(out_dir, gold_dir)
# Create a list of all the details about the failure. The list will be
# joined together when sent out.
failure_messages = []
missing = diff.left_only
if missing:
failure_messages += [
'Missing %d files: %s' % (len(missing), str(missing))
]
extra = diff.right_only
if extra:
failure_messages += [
'Found %d unexpected files: %s' % (len(extra), str(extra))
]
# Produce nice diffs for each file that differs.
for diff_file in diff.diff_files:
if diff_file in self._allowed_diff_files:
continue
actual_file = os.path.join(out_dir, diff_file)
expected_file = os.path.join(gold_dir, diff_file)
output, error = self._GitDiff(expected_file, actual_file)
# If this is an MP4 file, get a better looking diff.
if ((output or error) and
os.path.splitext(actual_file)[1] in {'.mp4', '.m4s'}):
new_output, new_error = self._Mp4Diff(
out_dir, expected_file, actual_file)
output = new_output or output
error = new_error or error
if output:
failure_messages += [output.decode('utf8')]
if error:
failure_messages += [error.decode('utf8')]
if self._exact:
for diff_file in self._allowed_diff_files:
if diff_file not in diff.diff_files:
failure_messages += ['Expecting "%s" to be different' % diff_file]
return failure_messages
def _GitDiff(self, file_a, file_b):
cmd = [
'git',
'--no-pager',
'diff',
'--color=auto',
'--no-ext-diff',
'--no-index',
file_a,
file_b
]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return p.communicate()
def _Mp4Diff(self, out_dir, file_a, file_b):
dump_a = os.path.join(out_dir, os.path.basename(file_a) + '.dump.expected')
dump_b = os.path.join(out_dir, os.path.basename(file_b) + '.dump.actual')
try:
cmd = ['mp4dump', '--verbosity', '2', file_a]
with open(dump_a, 'w') as f:
subprocess.check_call(cmd, stdout=f)
cmd = ['mp4dump', '--verbosity', '2', file_b]
with open(dump_b, 'w') as f:
subprocess.check_call(cmd, stdout=f)
except (OSError, subprocess.CalledProcessError):
# If the program isn't available or returns an error, just ignore it and
# use the normal diff.
return None, None
return self._GitDiff(dump_a, dump_b)
def _UpdateGold(self, out_dir, gold_dir):
if os.path.exists(gold_dir):
shutil.rmtree(gold_dir)
shutil.copytree(out_dir, gold_dir)
def _UpdateMediaInfoPaths(media_info_filepath):
# Example:
# before: media_file_name: "/tmp/tmpD1h5UC/bear-640x360-audio.mp4"
# after: media_file_name: "bear-640x360-audio.mp4"
with open(media_info_filepath, 'rb') as f:
content = f.read().decode()
regex = 'media_file_name: "(.*)"'
for path in re.findall(regex, content):
short_path = os.path.basename(path)
content = content.replace(path, short_path)
with open(media_info_filepath, 'wb') as f:
f.write(content.encode())
def _UpdateMpdTimes(mpd_filepath):
# Take a single pattern, and replace the first match with the
# given new string.
def _Replace(str_in, pattern, new):
m = re.search(pattern, str_in)
if m:
old = m.group(0)
out = str_in.replace(old, new)
logging.info('Replacing "%s" with "%s"', old, new)
else:
out = str_in
return out
with open(mpd_filepath, 'rb') as f:
content = f.read().decode()
content = _Replace(
content,
'availabilityStartTime="[^"]+"',
'availabilityStartTime="some_time"')
content = _Replace(
content,
'publishTime="[^"]+"',
'publishTime="some_time"')
with open(mpd_filepath, 'wb') as f:
f.write(content.encode())
def GetExtension(input_file_path, output_format):
if output_format:
return output_format
# Otherwise use the same extension as the input.
ext = os.path.splitext(input_file_path)[1]
return ext[1:] # Remove the leading '.'.
def GetSegmentedExtension(base_extension):
if base_extension == 'mp4':
return 'm4s'
return base_extension
class PackagerAppTest(unittest.TestCase):
def setUp(self):
super(PackagerAppTest, self).setUp()
self.packager = packager_app.PackagerApp()
self.tmp_dir = tempfile.mkdtemp()
self.test_data_dir = os.path.join(test_env.SRC_DIR, 'packager', 'media',
'test', 'data')
self.golden_file_dir = os.path.join(test_env.SRC_DIR, 'packager', 'app',
'test', 'testdata')
self.mpd_output = os.path.join(self.tmp_dir, 'output.mpd')
self.hls_master_playlist_output = os.path.join(self.tmp_dir, 'output.m3u8')
self.output = []
# Test variables.
self.encryption_key_id = '31323334353637383930313233343536'
if test_env.options.encryption_key:
self.encryption_key = test_env.options.encryption_key
else:
self.encryption_key = '32333435363738393021323334353637'
if test_env.options.encryption_iv:
self.encryption_iv = test_env.options.encryption_iv
else:
self.encryption_iv = '3334353637383930'
self.widevine_content_id = '3031323334353637'
self.pssh = ('0000002070737368000000001077efecc0b24d02ace33c1e52e2fb4b000'
'00000')
# TS files may have a non-zero start, which could result in the first
# segment to be less than 1 second. Set clear_lead to be less than 1
# so only the first segment is left in clear.
self.clear_lead = 0.8
def tearDown(self):
if test_env.options.remove_temp_files_after_test:
shutil.rmtree(self.tmp_dir)
super(PackagerAppTest, self).tearDown()
def _GetStream(self,
descriptor,
language=None,
output_file_prefix=None,
output_format=None,
segmented=False,
using_time_specifier=False,
hls=False,
hls_characteristics=None,
hls_only=None,
dash_accessibilities=None,
dash_roles=None,
dash_only=None,
trick_play_factor=None,
drm_label=None,
skip_encryption=None,
bandwidth=None,
split_content_on_ad_cues=False,
test_file=None):
"""Get a stream descriptor as a string.
Create the stream descriptor as a string for the given parameters so that
it can be passed as an input parameter to the packager.
Args:
descriptor: The name of the stream in the container that should be used as
input for the output.
language: The language override for the input stream.
output_file_prefix: The output file prefix. Default to empty if not
specified.
output_format: The format for the output.
segmented: Should the output use a segmented formatted. This will affect
the output extensions and manifests.
using_time_specifier: Use $Time$ in segment name instead of using
$Number$. This flag is only relevant if segmented is True.
hls: Should the output be for an HLS manifest.
hls_characteristics: CHARACTERISTICS attribute for the HLS stream.
hls_only: If set to true, will indicate that the stream is for HLS only.
dash_accessibilities: Accessibility element for the DASH stream.
dash_roles: Role element for the DASH stream.
dash_only: If set to true, will indicate that the stream is for DASH only.
trick_play_factor: Signals the stream is to be used for a trick play
stream and which key frames to use. A trick play factor of 0 is the
same as not specifying a trick play factor.
drm_label: The drm label for the stream.
skip_encryption: If set to true, the stream will not be encrypted.
bandwidth: The expected bandwidth value that should be listed in the
manifest.
split_content_on_ad_cues: If set to true, the output file will be split
into multiple files, with a total of NumAdCues + 1 files.
test_file: The input file to use. If the input file is not specified, a
default file will be used.
Returns:
A string that makes up a single stream descriptor for input to the
packager.
"""
input_file_name = test_file or 'bear-640x360.mp4'
input_file_path = os.path.join(self.test_data_dir, input_file_name)
stream = StreamDescriptor(input_file_path)
stream.Append('stream', descriptor)
if output_format:
stream.Append('format', output_format)
if language:
stream.Append('lang', language)
if trick_play_factor:
stream.Append('trick_play_factor', trick_play_factor)
if drm_label:
stream.Append('drm_label', drm_label)
if skip_encryption:
stream.Append('skip_encryption', 1)
base_ext = GetExtension(input_file_path, output_format)
output_file_name_base = stream.GetOutputFileNameBase(output_file_prefix)
if hls:
stream.Append('playlist_name', output_file_name_base + '.m3u8')
# By default, add a iframe playlist for all HLS playlists (assuming that
# the source input is supported). iframe playlists should only be for
# videos. This check will fail for numeric descriptors, but that is an
# acceptable limitation (b/73960731).
if base_ext in ['ts', 'mp4'] and descriptor == 'video':
stream.Append('iframe_playlist_name',
output_file_name_base + '-iframe.m3u8')
if hls_characteristics:
stream.Append('hls_characteristics', hls_characteristics)
if hls_only:
stream.Append('hls_only', 1)
if dash_accessibilities:
stream.Append('dash_accessibilities', dash_accessibilities)
if dash_roles:
stream.Append('dash_roles', dash_roles)
if dash_only:
stream.Append('dash_only', 1)
requires_init_segment = segmented and base_ext not in [
'aac', 'ac3', 'ec3', 'ts', 'vtt'
]
output_file_path = os.path.join(self.tmp_dir, output_file_name_base)
if requires_init_segment:
init_seg = '%s-init.%s' % (output_file_path, base_ext)
stream.Append('init_segment', init_seg)
if segmented:
segment_specifier = '$Time$' if using_time_specifier else '$Number$'
segment_ext = GetSegmentedExtension(base_ext)
seg_template = '%s-%s.%s' % (output_file_path, segment_specifier,
segment_ext)
stream.Append('segment_template', seg_template)
else:
if split_content_on_ad_cues:
output_file_path += '$Number$.' + base_ext
else:
output_file_path += '.' + base_ext
stream.Append('output', output_file_path)
if bandwidth:
stream.Append('bandwidth', bandwidth)
self.output.append(output_file_path)
return str(stream)
def _GetStreams(self, streams, test_files=None, **kwargs):
# Make sure there is a valid list that we can get the length from.
test_files = test_files or []
test_files_count = len(test_files)
out = []
if test_files_count == 0:
for stream in streams:
out.append(self._GetStream(stream, **kwargs))
else:
for file_name in test_files:
for stream in streams:
out.append(self._GetStream(stream, test_file=file_name, **kwargs))
return out
def _GetFlags(self,
strip_parameter_set_nalus=True,
encryption=False,
protection_systems=None,
protection_scheme=None,
crypt_byte_block=None,
skip_byte_block=None,
vp9_subsample_encryption=True,
decryption=False,
random_iv=False,
widevine_encryption=False,
key_rotation=False,
include_pssh_in_stream=True,
dash_if_iop=True,
output_media_info=False,
output_dash=False,
output_hls=False,
hls_playlist_type=None,
time_shift_buffer_depth=0.0,
preserved_segments_outside_live_window=0,
utc_timings=None,
generate_static_live_mpd=False,
ad_cues=None,
default_language=None,
segment_duration=1.0,
use_fake_clock=True,
allow_codec_switching=False):
flags = ['--single_threaded']
if not strip_parameter_set_nalus:
flags += ['--strip_parameter_set_nalus=false']
if widevine_encryption:
widevine_server_url = ('https://license.uat.widevine.com/cenc'
'/getcontentkey/widevine_test')
flags += [
'--enable_widevine_encryption',
'--key_server_url=' + widevine_server_url,
'--content_id=' + self.widevine_content_id,
]
elif encryption:
flags += [
'--enable_raw_key_encryption',
'--keys=label=:key_id={0}:key={1}'.format(self.encryption_key_id,
self.encryption_key),
'--clear_lead={0}'.format(self.clear_lead)
]
if not random_iv:
flags.append('--iv=' + self.encryption_iv)
if protection_systems:
flags += ['--protection_systems=' + protection_systems]
if 'FairPlay' in protection_systems:
fairplay_key_uri = ('skd://www.license.com/getkey?'
'KeyId=31323334-3536-3738-3930-313233343536')
flags += ['--hls_key_uri=' + fairplay_key_uri]
if protection_scheme:
flags += ['--protection_scheme', protection_scheme]
if crypt_byte_block is not None and skip_byte_block is not None:
flags += [
'--crypt_byte_block={0}'.format(crypt_byte_block),
'--skip_byte_block={0}'.format(skip_byte_block)
]
if not vp9_subsample_encryption:
flags += ['--vp9_subsample_encryption=false']
if decryption:
flags += [
'--enable_raw_key_decryption',
'--keys=label=:key_id={0}:key={1}'.format(self.encryption_key_id,
self.encryption_key)
]
if key_rotation:
flags.append('--crypto_period_duration=1')
if not include_pssh_in_stream:
flags.append('--mp4_include_pssh_in_stream=false')
if not dash_if_iop:
flags.append('--generate_dash_if_iop_compliant_mpd=false')
if output_media_info:
flags.append('--output_media_info')
if output_dash:
flags += ['--mpd_output', self.mpd_output]
if output_hls:
flags += ['--hls_master_playlist_output', self.hls_master_playlist_output]
if hls_playlist_type:
flags += ['--hls_playlist_type', hls_playlist_type]
if time_shift_buffer_depth != 0.0:
flags += ['--time_shift_buffer_depth={0}'.format(time_shift_buffer_depth)]
if preserved_segments_outside_live_window != 0:
flags += [
'--preserved_segments_outside_live_window={0}'.format(
preserved_segments_outside_live_window)
]
if utc_timings:
flags += ['--utc_timings', utc_timings]
if generate_static_live_mpd:
flags += ['--generate_static_live_mpd']
if allow_codec_switching:
flags += ['--allow_codec_switching']
if ad_cues:
flags += ['--ad_cues', ad_cues]
if default_language:
flags += ['--default_language', default_language]
flags.append('--segment_duration={0}'.format(segment_duration))
# Use fake clock, so output can be compared.
if use_fake_clock:
flags.append('--use_fake_clock_for_muxer')
# Override packager version string for testing.
flags += ['--test_packager_version', '<tag>-<hash>-<test>']
return flags
def _AssertStreamInfo(self, stream, info):
stream_info = self.packager.DumpStreamInfo(stream)
self.assertIn('Found 1 stream(s).', stream_info)
self.assertIn(info, stream_info)
def _Decrypt(self, file_path):
streams = [
self._GetStream(
'0', output_file_prefix='decrypted', test_file=file_path)
]
self.assertPackageSuccess(streams, self._GetFlags(decryption=True))
def _CheckTestResults(self,
test_dir,
verify_decryption=False,
diff_files_policy=DiffFilesPolicy()):
"""Check test results. Updates golden files in update mode.
Args:
test_dir: The golden directory to be compared with. It is expected to be
relative to |self.golden_file_dir|.
verify_decryption: If set to true, assumes the media files without
'skip-encryption' in name to be encrypted and tries to decrypt and
then compare these files.
diff_files_policy: Specifies DiffFiles policy and handles files
comparison.
"""
# Live mpd contains current availabilityStartTime and publishTime, which
# needs to be replaced before comparison. If this is not a live test, then
# this will be a no-op.
mpds = glob.glob(os.path.join(self.tmp_dir, '*.mpd'))
for manifest in mpds:
_UpdateMpdTimes(manifest)
# '*.media_info' outputs contain media file names, which is changing for
# every test run. These needs to be replaced for comparison.
media_infos = glob.glob(os.path.join(self.tmp_dir, '*.media_info'))
for media_info in media_infos:
_UpdateMediaInfoPaths(media_info)
if verify_decryption:
for file_name in os.listdir(self.tmp_dir):
if 'skip_encryption' in file_name:
continue
extension = os.path.splitext(file_name)[1][1:]
if extension not in ['mpd', 'm3u8', 'media_info']:
self._Decrypt(os.path.join(self.tmp_dir, file_name))
out_dir = self.tmp_dir
gold_dir = os.path.join(self.golden_file_dir, test_dir)
failure_messages = diff_files_policy.ProcessDiff(out_dir, gold_dir)
if failure_messages:
# Prepend the failure messages with the header.
failure_messages = [
_TEST_FAILURE_COMMAND_LINE_MESSAGE,
self.packager.GetCommandLine()
] + failure_messages
self.fail('\n'.join(failure_messages))
class PackagerFunctionalTest(PackagerAppTest):
def assertPackageSuccess(self, streams, flags=None):
self.assertEqual(self.packager.Package(streams, flags), 0)
def assertMpdGeneratorSuccess(self):
media_infos = glob.glob(os.path.join(self.tmp_dir, '*.media_info'))
self.assertTrue(media_infos)
flags = ['--input', ','.join(media_infos), '--output', self.mpd_output]
flags += ['--test_packager_version', '<tag>-<hash>-<test>']
self.assertEqual(self.packager.MpdGenerator(flags), 0)
def testVersion(self):
# To support python version 2, which does not have assertRegex.
if 'assertRegex' not in dir(self):
assert_regex = self.assertRegexpMatches
else:
assert_regex = self.assertRegex
assert_regex(
self.packager.Version(), '^packager(.exe)? version '
r'((?P<tag>[\w\.]+)-)?(?P<hash>[a-f\d]+)-(debug|release)[\r\n]+.*$')
def testDumpStreamInfo(self):
test_file = os.path.join(self.test_data_dir, 'bear-640x360.mp4')
stream_info = self.packager.DumpStreamInfo(test_file)
expected_stream_info = ('Found 2 stream(s).\n'
'Stream [0] type: Video\n'
' codec_string: avc1.64001e\n'
' time_scale: 30000\n'
' duration: 82082 (2.7 seconds)\n'
' is_encrypted: false\n'
' codec: H264\n'
' width: 640\n'
' height: 360\n'
' pixel_aspect_ratio: 1:1\n'
' trick_play_factor: 0\n'
' nalu_length_size: 4\n\n'
'Stream [1] type: Audio\n'
' codec_string: mp4a.40.2\n'
' time_scale: 44100\n'
' duration: 121856 (2.8 seconds)\n'
' is_encrypted: false\n'
' codec: AAC\n'
' sample_bits: 16\n'
' num_channels: 2\n'
' sampling_frequency: 44100\n'
' language: und\n')
stream_info = stream_info.replace('\r\n', '\n')
self.assertIn(expected_stream_info, stream_info,
'\nExpecting: \n %s\n\nBut seeing: \n%s' %
(expected_stream_info, stream_info))
def testFirstStream(self):
self.assertPackageSuccess(
self._GetStreams(['0']), self._GetFlags(output_dash=True))
self._CheckTestResults('first-stream')
# Probably one of the most common scenarios is to package audio and video.
def testAudioVideo(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']), self._GetFlags(output_dash=True))
self._CheckTestResults('audio-video')
def testAudioVideoWithAccessibilitiesAndRoles(self):
streams = [
self._GetStream(
'audio',
dash_accessibilities='urn:tva:metadata:cs:AudioPurposeCS:2007=1',
dash_roles='alternate'),
self._GetStream('video'),
]
streams += self._GetStreams(
['text'],
test_files=['bear-english.vtt'],
dash_roles='caption')
self.assertPackageSuccess(streams, self._GetFlags(output_dash=True))
self._CheckTestResults('audio-video-with-accessibilities-and-roles')
def testAudioVideoWithTrickPlay(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=1),
]
self.assertPackageSuccess(streams, self._GetFlags(output_dash=True))
self._CheckTestResults('audio-video-with-trick-play')
def testAudioVideoWithTwoTrickPlay(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=1),
self._GetStream('video', trick_play_factor=2),
]
self.assertPackageSuccess(streams, self._GetFlags(output_dash=True))
self._CheckTestResults('audio-video-with-two-trick-play')
def testAudioVideoWithTwoTrickPlayDecreasingRate(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=2),
self._GetStream('video', trick_play_factor=1),
]
self.assertPackageSuccess(streams, self._GetFlags(output_dash=True))
# Since the stream descriptors are sorted in packager app, a different
# order of trick play factors gets the same mpd.
self._CheckTestResults('audio-video-with-two-trick-play')
def testDashOnlyAndHlsOnlyWithCaptions(self):
audio_video_streams = self._GetStreams(['audio', 'video'], segmented=True)
dash_text_stream = self._GetStreams(['text'],
test_files=['bear-english.vtt'],
output_format='mp4',
segmented=True,
dash_only=True)
hls_text_stream = self._GetStreams(['text'],
test_files=['bear-english.vtt'],
segmented=True,
hls_only=True)
streams = audio_video_streams + dash_text_stream + hls_text_stream
self.assertPackageSuccess(streams, self._GetFlags(output_dash=True,
output_hls=True))
self._CheckTestResults('hls-only-dash-only-captions')
def testDashOnlyAndHlsOnly(self):
streams = [
self._GetStream('video', hls_only=True),
self._GetStream('audio', dash_only=True),
]
self.assertPackageSuccess(
streams,
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('hls-only-dash-only')
def testAudioVideoWithLanguageOverride(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], language='por', hls=True),
self._GetFlags(default_language='por', output_dash=True,
output_hls=True))
self._CheckTestResults('audio-video-with-language-override')
def testAudioVideoWithLanguageOverrideUsingMixingCode(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], language='por', hls=True),
self._GetFlags(default_language='pt', output_dash=True,
output_hls=True))
self._CheckTestResults('audio-video-with-language-override')
def testAudioVideoWithLanguageOverrideUsingMixingCode2(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], language='pt', hls=True),
self._GetFlags(default_language='por', output_dash=True,
output_hls=True))
self._CheckTestResults('audio-video-with-language-override')
def testAudioVideoWithLanguageOverrideUsingTwoCharacterCode(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], language='pt', hls=True),
self._GetFlags(default_language='pt', output_dash=True,
output_hls=True))
self._CheckTestResults('audio-video-with-language-override')
def testAudioVideoWithLanguageOverrideWithSubtag(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], language='por-BR', hls=True),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('audio-video-with-language-override-with-subtag')
def testSegmentedWebVttWithLanguageOverride(self):
streams = self._GetStreams(
['text'], language='por', dash_only=True, output_format='mp4',
test_files=['bear-english.vtt'], segmented=True)
streams += self._GetStreams(
['text'], language='por', hls_only=True,
test_files=['bear-english.vtt'], segmented=True)
flags = self._GetFlags(output_hls=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('segmented-webvtt-with-language-override')
def testSegmentedWebVttText(self):
streams = self._GetStreams(
['text'], test_files=['bear-english.vtt'], segmented=True)
flags = self._GetFlags(output_hls=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('segmented-webvtt-text')
def testSingleFileWebVttText(self):
streams = self._GetStreams(['text'], test_files=['bear-english.vtt'])
flags = self._GetFlags(output_hls=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('single-file-webvtt-text')
def testSegmentedWebVttMp4(self):
streams = self._GetStreams(['text'], test_files=['bear-english.vtt'],
output_format='mp4', segmented=True)
flags = self._GetFlags(output_hls=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('segmented-webvtt-mp4')
def testMp4TrailingMoov(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
test_files=['bear-640x360-trailing-moov.mp4']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('mp4-trailing-moov')
def testVideoNonSquarePixel(self):
self.assertPackageSuccess(
self._GetStreams(
['video'],
test_files=['bear-640x360-non_square_pixel-with_pasp.mp4']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('video-non-square-pixel')
def testAacHe(self):
self.assertPackageSuccess(
self._GetStreams(
['audio'], test_files=['bear-640x360-aac_he-silent_right.mp4']),
self._GetFlags(output_dash=True))
self._CheckTestResults('acc-he')
def testVideoAudioWebVTT(self):
audio_video_streams = self._GetStreams(['audio', 'video'])
text_stream = self._GetStreams(['text'], test_files=['bear-english.vtt'])
self.assertPackageSuccess(audio_video_streams + text_stream,
self._GetFlags(output_dash=True))
self._CheckTestResults('video-audio-webvtt')
def testVideoAudioTTML(self):
audio_video_streams = self._GetStreams(['audio', 'video'])
text_stream = self._GetStreams(['text'], test_files=['bear-english.ttml'])
self.assertPackageSuccess(audio_video_streams + text_stream,
self._GetFlags(output_dash=True))
self._CheckTestResults('video-audio-ttml')
def testVideoNoEditList(self):
stream = self._GetStream('video', test_file='bear-640x360-no_edit_list.mp4')
self.assertPackageSuccess([stream], self._GetFlags(output_dash=True))
self._CheckTestResults('video-no-edit-list')
def testAvcAacTs(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('avc-aac-ts')
def testAvcAc3Ts(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(output_hls=True))
self._CheckTestResults('avc-ac3-ts')
def testAvcAc3TsToMp4(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
output_format='mp4',
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(output_hls=True))
self._CheckTestResults('avc-ac3-ts-to-mp4')
def testAvcTsLivePlaylist(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
output_hls=True,
hls_playlist_type='LIVE',
time_shift_buffer_depth=0.5))
self._CheckTestResults('avc-ts-live-playlist')
def testAvcTsLivePlaylistWithKeyRotation(self):
self.packager.Package(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
encryption=True,
key_rotation=True,
output_hls=True,
hls_playlist_type='LIVE',
time_shift_buffer_depth=0.5))
self._CheckTestResults('avc-ts-live-playlist-with-key-rotation')
def testAvcTsEventPlaylist(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
output_hls=True,
hls_playlist_type='EVENT',
time_shift_buffer_depth=0.5))
self._CheckTestResults('avc-ts-event-playlist')
def testAvcTsLivePlaylistAndDashDynamicWithSegmentDeletion(self):
self.assertPackageSuccess(
self._GetStreams(
['audio'],
output_format='mp4',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
output_hls=True,
hls_playlist_type='LIVE',
output_dash=True,
segment_duration=0.5,
time_shift_buffer_depth=0.5,
preserved_segments_outside_live_window=1))
self._CheckTestResults(
'avc-ts-live-playlist-dash-dynamic-with-segment-deletion')
def testVp8Webm(self):
self.assertPackageSuccess(
self._GetStreams(['video'], test_files=['bear-640x360.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp8-webm')
def testVp9Webm(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
test_files=['bear-320x240-vp9-opus.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp9-webm')
def testVp9WebmWithBlockgroup(self):
self.assertPackageSuccess(
self._GetStreams(['video'], test_files=['bear-vp9-blockgroup.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp9-webm-with-blockgroup')
def testVorbisWebm(self):
self.assertPackageSuccess(
self._GetStreams(['audio'],
test_files=['bear-320x240-audio-only.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vorbis-webm')
def testAv1Mp4(self):
self.assertPackageSuccess(
self._GetStreams(['video'], test_files=['bear-av1.mp4']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-mp4')
def testAv1Mp4ToWebM(self):
self.assertPackageSuccess(
self._GetStreams(['video'],
output_format='webm',
test_files=['bear-av1.mp4']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-mp4-to-webm')
def testAv1WebM(self):
self.assertPackageSuccess(
self._GetStreams(['video'],
output_format='mp4',
test_files=['bear-av1.webm']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-webm')
def testEncryption(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults('encryption', verify_decryption=True)
def testEncryptionWithMultiDrms(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True,
protection_systems='Widevine,PlayReady,FairPlay,Marlin',
output_dash=True,
output_hls=True))
self._CheckTestResults('encryption-with-multi-drms')
# Test deprecated flag --enable_fixed_key_encryption, which is still
# supported currently.
def testEncryptionUsingFixedKey(self):
flags = self._GetFlags(output_dash=True) + [
'--enable_fixed_key_encryption', '--key_id={0}'.format(
self.encryption_key_id), '--key={0}'.format(self.encryption_key),
'--clear_lead={0}'.format(self.clear_lead), '--iv={0}'.format(
self.encryption_iv)
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._CheckTestResults('encryption-using-fixed-key', verify_decryption=True)
def testEncryptionMultiKeys(self):
audio_key_id = '10111213141516171819202122232425'
audio_key = '11121314151617181920212223242526'
video_key_id = '20212223242526272829303132333435'
video_key = '21222324252627282930313233343536'
flags = self._GetFlags(output_dash=True) + [
'--enable_raw_key_encryption',
'--keys=label=AUDIO:key_id={0}:key={1},label=SD:key_id={2}:key={3}'.
format(audio_key_id, audio_key,
video_key_id, video_key), '--clear_lead={0}'.format(
self.clear_lead), '--iv={0}'.format(self.encryption_iv)
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._CheckTestResults('encryption-multi-keys')
def testEncryptionMultiKeysWithStreamLabel(self):
audio_key_id = '20212223242526272829303132333435'
audio_key = '21222324252627282930313233343536'
video_key_id = '10111213141516171819202122232425'
video_key = '11121314151617181920212223242526'
flags = self._GetFlags(output_dash=True) + [
'--enable_raw_key_encryption',
'--keys=label=MyAudio:key_id={0}:key={1},label=:key_id={2}:key={3}'.
format(audio_key_id, audio_key,
video_key_id, video_key), '--clear_lead={0}'.format(
self.clear_lead), '--iv={0}'.format(self.encryption_iv)
]
# DRM label 'MyVideo' is not defined, will fall back to the key for the
# empty default label.
streams = [
self._GetStream('audio', drm_label='MyAudio'),
self._GetStream('video', drm_label='MyVideo')
]
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('encryption-multi-keys-with-stream-label')
def testExplicitPssh(self):
flags = self._GetFlags(encryption=True, output_dash=True) + [
'--pssh={0}'.format(self.pssh),
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._CheckTestResults('encryption-using-explicit-pssh')
def testEncryptionOfOnlyVideoStream(self):
streams = [
self._GetStream('audio', skip_encryption=True),
self._GetStream('video')
]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults(
'encryption-of-only-video-stream', verify_decryption=True)
def testEncryptionAndTrickPlay(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=1),
]
self.assertPackageSuccess(streams,
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults('encryption-and-trick-play', verify_decryption=True)
# TODO(hmchen): Add a test case that SD and HD AdapatationSet share one trick
# play stream.
def testEncryptionAndTwoTrickPlays(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=1),
self._GetStream('video', trick_play_factor=2),
]
self.assertPackageSuccess(streams,
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults('encryption-and-two-trick-plays')
def testEncryptionAndNoClearLead(self):
streams = [
self._GetStream('audio'),
self._GetStream('video')
]
self.clear_lead = 0
self.assertPackageSuccess(streams,
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults(
'encryption-and-no-clear-lead', verify_decryption=True)
def testEncryptionAndNoPsshInStream(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True, include_pssh_in_stream=False, output_dash=True))
self._CheckTestResults('encryption-and-no-pssh-in-stream')
def testEncryptionCbc1(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True, protection_scheme='cbc1', output_dash=True))
self._CheckTestResults('encryption-cbc-1', verify_decryption=True)
def testEncryptionCens(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True, protection_scheme='cens', output_dash=True))
self._CheckTestResults('encryption-cens', verify_decryption=True)
def testEncryptionCbcs(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True, protection_scheme='cbcs', output_dash=True))
self._CheckTestResults('encryption-cbcs', verify_decryption=True)
def testEncryptionCbcsWithFullProtection(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True,
protection_scheme='cbcs',
crypt_byte_block=10,
skip_byte_block=0,
output_dash=True))
self._CheckTestResults(
'encryption-cbcs-with-full-protection', verify_decryption=True)
def testEncryptionAndAdCues(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], hls=True),
self._GetFlags(encryption=True, output_dash=True, output_hls=True,
ad_cues='1.5'))
self._CheckTestResults('encryption-and-ad-cues')
def testEncryptionAndAdCuesAndDashTrickPlay(self):
streams = [
self._GetStream('audio'),
self._GetStream('video'),
self._GetStream('video', trick_play_factor=1),
]
self.assertPackageSuccess(
streams, self._GetFlags(
encryption=True, output_dash=True, ad_cues='1.5'))
self._CheckTestResults('encryption-and-ad-cues-and-dash-trick-play')
def testEncryptionAndAdCuesSplitContent(self):
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'], hls=True, split_content_on_ad_cues=True),
self._GetFlags(
encryption=True, output_dash=True, output_hls=True, ad_cues='1.5'))
self._CheckTestResults('encryption-and-ad-cues-split-content')
def testHlsAudioVideoTextWithAdCues(self):
streams = [
self._GetStream('audio', hls=True, segmented=True),
self._GetStream('video', hls=True, segmented=True),
self._GetStream(
'text', hls=True, segmented=True, test_file='bear-english.vtt')
]
flags = self._GetFlags(output_hls=True, ad_cues='1.5')
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('hls-audio-video-text-with-ad-cues')
def testVttTextToMp4WithAdCues(self):
streams = [
self._GetStream('audio', hls=True, segmented=True),
self._GetStream('video', hls=True, segmented=True),
self._GetStream(
'text',
hls=True,
segmented=True,
test_file='bear-english.vtt',
output_format='mp4')
]
flags = self._GetFlags(output_dash=True, output_hls=True,
generate_static_live_mpd=True, ad_cues='1.5')
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('vtt-text-to-mp4-with-ad-cues')
def testWebmSubsampleEncryption(self):
streams = [
self._GetStream('video', test_file='bear-320x180-vp9-altref.webm')
]
self.assertPackageSuccess(streams,
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults('webm-subsample-encryption', verify_decryption=True)
def testWebmVp9FullSampleEncryption(self):
streams = [
self._GetStream('video', test_file='bear-320x180-vp9-altref.webm')
]
flags = self._GetFlags(
encryption=True, vp9_subsample_encryption=False, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults(
'webm-vp9-full-sample-encryption', verify_decryption=True)
def testAvcTsWithEncryption(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('avc-ts-with-encryption')
def testAvcTsAacPackedAudioWithEncryption(self):
# Currently we only support live packaging for ts.
streams = [
self._GetStream(
'audio',
output_format='aac',
segmented=True,
hls=True,
test_file='bear-640x360.ts'),
self._GetStream(
'video', segmented=True, hls=True, test_file='bear-640x360.ts')
]
flags = self._GetFlags(encryption=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('avc-ts-aac-packed-audio-with-encryption')
def testAvcTsWithEncryptionAndFairPlay(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
encryption=True, protection_systems='FairPlay', output_hls=True))
self._CheckTestResults('avc-ts-with-encryption-and-fairplay')
def testAvcAc3TsWithEncryption(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('avc-ac3-ts-with-encryption')
def testAvcTsAc3PackedAudioWithEncryption(self):
# Currently we only support live packaging for ts.
streams = [
self._GetStream(
'audio',
output_format='ac3',
segmented=True,
hls=True,
test_file='bear-640x360-ac3.ts'),
self._GetStream(
'video', segmented=True, hls=True, test_file='bear-640x360-ac3.ts')
]
flags = self._GetFlags(encryption=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('avc-ts-ac3-packed-audio-with-encryption')
def testAvcTsWithEncryptionExerciseEmulationPrevention(self):
self.encryption_key = 'ad7e9786def9159db6724be06dfcde7a'
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['sintel-1024x436.mp4']),
self._GetFlags(
encryption=True,
output_hls=True))
self._CheckTestResults(
'avc-ts-with-encryption-exercise-emulation-prevention')
def testWebmWithEncryption(self):
streams = [self._GetStream('video', test_file='bear-640x360.webm')]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('webm-with-encryption', verify_decryption=True)
def testHevcWithEncryption(self):
streams = [
self._GetStream('video', test_file='bear-640x360-hevc.mp4')
]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('hevc-with-encryption', verify_decryption=True)
def testHdr10WithEncryption(self):
streams = [
self._GetStream('video', test_file='bear-640x360-hevc-hdr10.mp4')
]
flags = self._GetFlags(encryption=True, output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('hdr10-with-encryption')
def testDolbyVisionProfile5WithEncryption(self):
streams = [
self._GetStream('video', test_file='sparks_dovi_5.mp4')
]
flags = self._GetFlags(encryption=True, output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('dolby-vision-profile-5-with-encryption')
def testDolbyVisionProfile8WithEncryption(self):
streams = [
self._GetStream('video', test_file='sparks_dovi_8.mp4')
]
flags = self._GetFlags(encryption=True, output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('dolby-vision-profile-8-with-encryption')
def testVp8Mp4WithEncryption(self):
streams = [
self._GetStream('video',
output_format='mp4',
test_file='bear-640x360.webm')
]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('vp8-mp4-with-encryption', verify_decryption=True)
def testOpusVp9Mp4WithEncryption(self):
streams = [
self._GetStream('audio',
output_format='mp4',
test_file='bear-320x240-vp9-opus.webm'),
self._GetStream('video',
output_format='mp4',
test_file='bear-320x240-vp9-opus.webm'),
]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults(
'opus-vp9-mp4-with-encryption', verify_decryption=True)
def testFlacWithEncryption(self):
streams = [
self._GetStream('audio', test_file='bear-flac.mp4'),
]
flags = self._GetFlags(encryption=True, output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('flac-with-encryption', verify_decryption=True)
def testAv1Mp4WithEncryption(self):
self.assertPackageSuccess(
self._GetStreams(['video'], test_files=['bear-av1.mp4']),
self._GetFlags(encryption=True, output_dash=True, output_hls=True))
self._CheckTestResults('av1-mp4-with-encryption', verify_decryption=True)
def testAv1WebMWithEncryption(self):
self.assertPackageSuccess(
self._GetStreams(['video'], test_files=['bear-av1.webm']),
self._GetFlags(encryption=True, output_dash=True, output_hls=True))
self._CheckTestResults('av1-webm-with-encryption', verify_decryption=True)
def testWvmInput(self):
self.encryption_key = '9248d245390e0a49d483ba9b43fc69c3'
self.assertPackageSuccess(
self._GetStreams(['0', '1', '2', '3'],
output_format='mp4',
test_files=['bear-multi-configs.wvm']),
self._GetFlags(decryption=True, output_dash=True))
# Output timescale is 90000.
self._CheckTestResults('wvm-input')
# TODO(kqyang): Fix shared_library not supporting strip_parameter_set_nalus
# problem.
@unittest.skipUnless(
test_env.options.libpackager_type == 'static_library',
'libpackager shared_library does not support '
'--strip_parameter_set_nalus flag.'
)
def testWvmInputWithoutStrippingParameterSetNalus(self):
self.encryption_key = '9248d245390e0a49d483ba9b43fc69c3'
self.assertPackageSuccess(
self._GetStreams(['0', '1', '2', '3'],
output_format='mp4',
test_files=['bear-multi-configs.wvm']),
self._GetFlags(
strip_parameter_set_nalus=False, decryption=True, output_dash=True))
# Output timescale is 90000.
self._CheckTestResults('wvm-input-without-stripping-parameters-set-nalus')
def testEncryptionAndRandomIv(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, random_iv=True, output_dash=True))
# The outputs are encrypted with random iv, so they are not the same as
# golden files.
self._CheckTestResults(
'encryption',
verify_decryption=True,
diff_files_policy=DiffFilesPolicy(
allowed_diff_files=[
'bear-640x360-audio.mp4', 'bear-640x360-video.mp4'
],
exact=True,
allow_updating_golden_files=False))
def testEncryptionAndRealClock(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, output_dash=True, use_fake_clock=False))
# The outputs are generated with real clock, so they are not the same as
# golden files.
self._CheckTestResults(
'encryption',
verify_decryption=True,
diff_files_policy=DiffFilesPolicy(
allowed_diff_files=[
'bear-640x360-audio.mp4', 'bear-640x360-video.mp4'
],
exact=True,
allow_updating_golden_files=False))
def testEncryptionAndNonDashIfIop(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, dash_if_iop=False, output_dash=True))
self._CheckTestResults('encryption-and-non-dash-if-iop')
def testEncryptionAndOutputMediaInfo(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, output_media_info=True))
self._CheckTestResults('encryption-and-output-media-info')
def testEncryptionAndOutputMediaInfoAndMpdFromMediaInfo(self):
self.assertPackageSuccess(
# The order is not determinstic if there are more than one
# AdaptationSets, so only one is included here.
self._GetStreams(['video']),
self._GetFlags(encryption=True, output_media_info=True))
self.assertMpdGeneratorSuccess()
self._CheckTestResults(
'encryption-and-output-media-info-and-mpd-from-media-info')
def testHlsSingleSegmentMp4Encrypted(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], hls=True),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('hls-single-segment-mp4-encrypted')
def testEc3AndHlsSingleSegmentMp4Encrypted(self):
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'], hls=True, test_files=['bear-640x360-ec3.mp4']),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('ec3-and-hls-single-segment-mp4-encrypted')
def testEc3PackedAudioEncrypted(self):
streams = [
self._GetStream(
'audio',
output_format='ec3',
segmented=True,
hls=True,
test_file='bear-640x360-ec3.mp4'),
self._GetStream(
'video',
output_format='ts',
segmented=True,
hls=True,
test_file='bear-640x360-ec3.mp4')
]
flags = self._GetFlags(encryption=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('ec3-packed-audio-encrypted')
# Test HLS with multi-segment mp4 and content in subdirectories.
def testHlsMultiSegmentMp4WithCustomPath(self):
test_file = os.path.join(self.test_data_dir, 'bear-640x360.mp4')
# {tmp}/audio/audio-init.mp4, {tmp}/audio/audio-1.m4s etc.
audio_output_prefix = os.path.join(self.tmp_dir, 'audio', 'audio')
# {tmp}/video/video-init.mp4, {tmp}/video/video-1.m4s etc.
video_output_prefix = os.path.join(self.tmp_dir, 'video', 'video')
self.assertPackageSuccess(
[
'input=%s,stream=audio,init_segment=%s-init.mp4,'
'segment_template=%s-$Number$.m4s,playlist_name=audio/audio.m3u8' %
(test_file, audio_output_prefix, audio_output_prefix),
'input=%s,stream=video,init_segment=%s-init.mp4,'
'segment_template=%s-$Number$.m4s,playlist_name=video/video.m3u8' %
(test_file, video_output_prefix, video_output_prefix),
],
self._GetFlags(output_hls=True))
self._CheckTestResults('hls-multi-segment-mp4-with-custom-path')
def testLiveProfile(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(
output_dash=True,
utc_timings='urn:mpeg:dash:utc:http-xsdate:2014='
'http://foo.bar/my_body_is_the_current_date_and_time,'
'urn:mpeg:dash:utc:http-head:2014='
'http://foo.bar/check_me_for_the_date_header'))
self._CheckTestResults('live-profile')
def testLiveProfileWithWebM(self):
streams = self._GetStreams(['audio', 'video'],
segmented=True,
test_file='bear-640x360.webm')
flags = self._GetFlags(output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('live-profile-with-webm')
def testLiveStaticProfile(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(output_dash=True, generate_static_live_mpd=True))
self._CheckTestResults('live-static-profile')
def testLiveStaticProfileWithTimeInSegmentName(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
using_time_specifier=True),
self._GetFlags(output_dash=True, generate_static_live_mpd=True))
self._CheckTestResults('live-static-profile-with-time-in-segment-name')
def testAllowCodecSwitching(self):
streams = [
self._GetStream('video', test_file='bear-640x360-hevc.mp4'),
self._GetStream('video', test_file='bear-640x360.mp4'),
self._GetStream('video', test_file='bear-1280x720.mp4'),
self._GetStream('audio', test_file='bear-640x360.mp4'),
]
self.assertPackageSuccess(streams,
self._GetFlags(output_dash=True,
allow_codec_switching=True))
self._CheckTestResults('audio-video-with-codec-switching')
def testAllowCodecSwitchingWithEncryptionAndTrickplay(self):
streams = [
self._GetStream('video', test_file='bear-640x360-hevc.mp4'),
self._GetStream('video', test_file='bear-640x360.mp4'),
self._GetStream('video', test_file='bear-1280x720.mp4'),
self._GetStream('video', test_file='bear-1280x720.mp4',
trick_play_factor=1),
self._GetStream('audio', test_file='bear-640x360.mp4'),
]
self.assertPackageSuccess(streams,
self._GetFlags(output_dash=True,
allow_codec_switching=True,
encryption=True))
self._CheckTestResults(
'audio-video-with-codec-switching-encryption-trick-play')
def testLiveProfileAndEncryption(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults('live-profile-and-encryption')
def testLiveProfileAndEncryptionAndNonDashIfIop(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(encryption=True, dash_if_iop=False, output_dash=True))
self._CheckTestResults(
'live-profile-and-encryption-and-non-dash-if-iop')
def testLiveProfileAndEncryptionAndMultFiles(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
test_files=['bear-1280x720.mp4', 'bear-640x360.mp4',
'bear-320x180.mp4']),
self._GetFlags(encryption=True, output_dash=True))
self._CheckTestResults(
'live-profile-and-encryption-and-mult-files')
def testLiveProfileAndKeyRotation(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(encryption=True, key_rotation=True, output_dash=True))
self._CheckTestResults('live-profile-and-key-rotation')
def testLiveProfileAndKeyRotationCbcs(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(
encryption=True,
protection_scheme='cbcs',
key_rotation=True,
output_dash=True))
self._CheckTestResults('live-profile-and-key-rotation-cbcs')
def testLiveProfileAndKeyRotationAndNoPsshInStream(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(
encryption=True,
key_rotation=True,
include_pssh_in_stream=False,
output_dash=True))
self._CheckTestResults(
'live-profile-and-key-rotation-and-no-pssh-in-stream')
def testLiveProfileAndKeyRotationAndNonDashIfIop(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),
self._GetFlags(
encryption=True,
key_rotation=True,
dash_if_iop=False,
output_dash=True))
self._CheckTestResults(
'live-profile-and-key-rotation-and-non-dash-if-iop')
@unittest.skipUnless(test_env.has_aes_flags, 'Requires AES credentials.')
def testWidevineEncryptionWithAes(self):
flags = self._GetFlags(widevine_encryption=True, output_dash=True)
flags += [
'--signer=widevine_test',
'--aes_signing_key=' + test_env.options.aes_signing_key,
'--aes_signing_iv=' + test_env.options.aes_signing_iv
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._AssertStreamInfo(self.output[0], 'is_encrypted: true')
self._AssertStreamInfo(self.output[1], 'is_encrypted: true')
@unittest.skipUnless(test_env.has_aes_flags, 'Requires AES credentials.')
def testWidevineEncryptionWithAesAndMultFiles(self):
flags = self._GetFlags(widevine_encryption=True, output_dash=True)
flags += [
'--signer=widevine_test',
'--aes_signing_key=' + test_env.options.aes_signing_key,
'--aes_signing_iv=' + test_env.options.aes_signing_iv
]
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
test_files=['bear-1280x720.mp4', 'bear-640x360.mp4',
'bear-320x180.mp4']), flags)
with open(self.mpd_output, 'rb') as f:
logging.info(f.read())
# TODO(kqyang): Add some validations.
@unittest.skipUnless(test_env.has_aes_flags, 'Requires AES credentials.')
def testKeyRotationWithAes(self):
flags = self._GetFlags(
widevine_encryption=True, key_rotation=True, output_dash=True)
flags += [
'--signer=widevine_test',
'--aes_signing_key=' + test_env.options.aes_signing_key,
'--aes_signing_iv=' + test_env.options.aes_signing_iv
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._AssertStreamInfo(self.output[0], 'is_encrypted: true')
self._AssertStreamInfo(self.output[1], 'is_encrypted: true')
@unittest.skipUnless(test_env.has_rsa_flags, 'Requires RSA credentials.')
def testWidevineEncryptionWithRsa(self):
flags = self._GetFlags(widevine_encryption=True, output_dash=True)
flags += [
'--signer=widevine_test',
'--rsa_signing_key_path=' + test_env.options.rsa_signing_key_path
]
self.assertPackageSuccess(self._GetStreams(['audio', 'video']), flags)
self._AssertStreamInfo(self.output[0], 'is_encrypted: true')
self._AssertStreamInfo(self.output[1], 'is_encrypted: true')
def testHlsSegmentedWebVtt(self):
streams = self._GetStreams(
['audio', 'video'], output_format='ts', segmented=True)
streams += self._GetStreams(
['text'],
test_files=['bear-english.vtt'],
segmented=True,
hls_characteristics='public.accessibility.transcribes-spoken-dialog;'
'private.accessibility.widevine-special')
flags = self._GetFlags(output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('hls-segmented-webvtt')
def testBandwidthOverride(self):
streams = [
self._GetStream('audio', hls=True, bandwidth=11111),
self._GetStream('video', hls=True, bandwidth=44444)
]
flags = self._GetFlags(output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
self._CheckTestResults('bandwidth-override')
class PackagerCommandParsingTest(PackagerAppTest):
def testEncryptionWithIncorrectKeyIdLength1(self):
self.encryption_key_id = self.encryption_key_id[0:-2]
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithIncorrectKeyIdLength2(self):
self.encryption_key_id += '12'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithInvalidKeyIdValue(self):
self.encryption_key_id = self.encryption_key_id[0:-1] + 'g'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithIncorrectKeyLength1(self):
self.encryption_key = self.encryption_key[0:-2]
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithIncorrectKeyLength2(self):
self.encryption_key += '12'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithInvalidKeyValue(self):
self.encryption_key = self.encryption_key[0:-1] + 'g'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithIncorrectIvLength1(self):
self.encryption_iv = self.encryption_iv[0:-2]
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithIncorrectIvLength2(self):
self.encryption_iv += '12'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithInvalidIvValue(self):
self.encryption_iv = self.encryption_iv[0:-1] + 'g'
packaging_result = self.packager.Package(
self._GetStreams(['video']), self._GetFlags(encryption=True))
self.assertEqual(packaging_result, 1)
def testEncryptionWithInvalidPsshValue1(self):
packaging_result = self.packager.Package(
self._GetStreams(['video']),
self._GetFlags(encryption=True) + ['--pssh=ag'])
self.assertEqual(packaging_result, 1)
def testEncryptionWithInvalidPsshValue2(self):
packaging_result = self.packager.Package(
self._GetStreams(['video']),
self._GetFlags(encryption=True) + ['--pssh=1122'])
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionInvalidContentId(self):
self.widevine_content_id += 'ag'
flags = self._GetFlags(widevine_encryption=True)
flags += [
'--signer=widevine_test', '--aes_signing_key=1122',
'--aes_signing_iv=3344'
]
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionInvalidAesSigningKey(self):
flags = self._GetFlags(widevine_encryption=True)
flags += [
'--signer=widevine_test', '--aes_signing_key=11ag',
'--aes_signing_iv=3344'
]
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionInvalidAesSigningIv(self):
flags = self._GetFlags(widevine_encryption=True)
flags += [
'--signer=widevine_test', '--aes_signing_key=1122',
'--aes_signing_iv=33ag'
]
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionMissingAesSigningKey(self):
flags = self._GetFlags(widevine_encryption=True)
flags += ['--signer=widevine_test', '--aes_signing_iv=3344']
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionMissingAesSigningIv(self):
flags = self._GetFlags(widevine_encryption=True)
flags += ['--signer=widevine_test', '--aes_signing_key=1122']
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionMissingSigner1(self):
flags = self._GetFlags(widevine_encryption=True)
flags += ['--aes_signing_key=1122', '--aes_signing_iv=3344']
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionMissingSigner2(self):
flags = self._GetFlags(widevine_encryption=True)
flags += ['--rsa_signing_key_path=/tmp/test']
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionSignerOnly(self):
flags = self._GetFlags(widevine_encryption=True)
flags += ['--signer=widevine_test']
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testWidevineEncryptionAesSigningAndRsaSigning(self):
flags = self._GetFlags(widevine_encryption=True)
flags += [
'--signer=widevine_test',
'--aes_signing_key=1122',
'--aes_signing_iv=3344',
'--rsa_signing_key_path=/tmp/test',
]
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']), flags)
self.assertEqual(packaging_result, 1)
def testAudioVideoWithNotExistText(self):
audio_video_stream = self._GetStreams(['audio', 'video'])
text_stream = self._GetStreams(['text'], test_files=['not-exist.vtt'])
packaging_result = self.packager.Package(audio_video_stream + text_stream,
self._GetFlags())
# Expect the test to fail but we do not expect a crash.
self.assertNotEqual(packaging_result, 0)
self.assertLess(packaging_result, 10)
def testInconsistentOutputAndSegmentTemplateFormat(self):
test_file = os.path.join(self.test_data_dir, 'bear-640x360.mp4')
video_output_prefix = os.path.join(self.tmp_dir, 'video')
packaging_result = self.packager.Package([
'input=%s,stream=video,init_segment=%s-init.mp4,'
'segment_template=%s-$Number$.webm' %
(test_file, video_output_prefix, video_output_prefix),
], self._GetFlags())
# Expect the test to fail but we do not expect a crash.
self.assertEqual(packaging_result, 1)
def testIncorrectEncryptionPattern(self):
packaging_result = self.packager.Package(
self._GetStreams(['audio', 'video']),
self._GetFlags(
encryption=True,
protection_scheme='cbcs',
crypt_byte_block=12,
skip_byte_block=13,
output_dash=True))
self.assertEqual(packaging_result, 1)
if __name__ == '__main__':
unittest.main()
|
gregzajac/MyRent | MyRent/migrations/0002_auto_20200301_1531.py | <reponame>gregzajac/MyRent
# Generated by Django 3.0.3 on 2020-03-01 15:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tenant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=64, verbose_name='Imię')),
('last_name', models.CharField(max_length=64, verbose_name='Nazwisko')),
('phone', models.CharField(max_length=16, null=True, verbose_name='Telefon')),
('email', models.CharField(max_length=64, null=True, verbose_name='E-mail')),
('tenant_description', models.TextField(null=True, verbose_name='Dodatkowe info')),
],
),
migrations.RemoveField(
model_name='flat',
name='description',
),
migrations.AddField(
model_name='flat',
name='flat_description',
field=models.TextField(null=True, verbose_name='Dodatkowe info'),
),
migrations.CreateModel(
name='Agreement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=32, unique=True, verbose_name='Identyfikator umowy')),
('agreement_date', models.DateField(verbose_name='Data podpisania umowy')),
('date_from', models.DateField(verbose_name='Data początku najmu')),
('date_to', models.DateField(verbose_name='Data końca najmu')),
('mth_payment_value', models.FloatField(verbose_name='Miesięczny koszt wynajmu')),
('mth_payment_deadline', models.SmallIntegerField(verbose_name='Termin miesięcznej opłaty')),
('agreement_description', models.TextField(null=True, verbose_name='Dodatkowe info')),
('flat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.Flat', verbose_name='Wynajmowane mieszkanie')),
('tenant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.Tenant', verbose_name='Najemca')),
],
),
]
|
gregzajac/MyRent | MyRent/urls.py | from django.urls import path
from MyRent import views
urlpatterns = [
path('', views.FlatListView.as_view(), name="flat-list"),
path('add_flat/', views.CreateFlatView.as_view(), name="add-flat"),
path('agreement/', views.AgreementListView.as_view(), name="agreement-list"),
path('add_agreement/', views.AgreementCreateView.as_view(), name="add-agreement"),
path('agreement/delete/<int:pk>/', views.AgreementDeleteView.as_view(), name="delete-agreement"),
path('agreement/modify/<int:pk>/', views.AgreementUpdateView.as_view(), name="modify-agreement"),
path('flat/<int:pk>/', views.FlatDetailView.as_view(), name="flat-detail"),
path('add_image/<int:flat_id>/', views.ImageCreateView.as_view(), name="add-image"),
path('delete_image/<int:pk>/', views.ImageDeleteView.as_view(), name="delete-image"),
path('flat/delete/<int:pk>/', views.FlatDeleteView.as_view(), name="delete-flat"),
path('flat/modify/<int:pk>/', views.FlatUpdateView.as_view(), name="modify-flat"),
path('agreement/<int:pk>/', views.AgreementDetailView.as_view(), name="agreement-detail"),
path('add_operation/<int:agreement_id>/', views.OperationCreateView.as_view(), name="add-operation"),
path('operation/delete/<int:pk>/', views.OperationDeleteView.as_view(), name="delete-operation"),
path('operation/modify/<int:pk>/', views.OperationUpdateView.as_view(), name="modify-operation"),
path('add_obligations/<int:id_agreement>/', views.AddObligationsView.as_view(), name="add-obligations"),
path('tenant/', views.TenantListView.as_view(), name="tenant-list"),
path('add_tenant/', views.TenantCreateView.as_view(), name="add-tenant"),
path('tenant/delete/<int:pk>/', views.TenantDeleteView.as_view(), name="delete-tenant"),
path('tenant/modify/<int:pk>/', views.TenantUpdateView.as_view(), name="modify-tenant"),
]
|
gregzajac/MyRent | MyRent/migrations/0003_operation_operationdict.py | # Generated by Django 3.0.3 on 2020-03-01 20:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0002_auto_20200301_1531'),
]
operations = [
migrations.CreateModel(
name='OperationDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='Operacja finansowa')),
],
),
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Data operacji')),
('value', models.FloatField(verbose_name='Kwota operacji')),
('agreement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.Agreement', verbose_name='Umowa najmu')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.OperationDict', verbose_name='Typ operacji finansowej')),
],
),
]
|
gregzajac/MyRent | MyRent/admin.py | from django.contrib import admin
from django.utils.safestring import mark_safe
from MyRent.models import Flat, Tenant, Agreement, Operation, OperationDict, Landlord, Image
class FlatAdmin(admin.ModelAdmin):
model = Flat
list_display = ('__str__', 'is_for_rent', 'info')
class TenantAdmin(admin.ModelAdmin):
model = Tenant
list_display = ('__str__', 'phone', 'email', 'info')
class AgreementAdmin(admin.ModelAdmin):
model = Agreement
list_display = ('code', 'agreement_date', 'date_from', 'date_to',
'mth_payment_value', 'mth_payment_deadline', 'info')
class OperationAdmin(admin.ModelAdmin):
model = Operation
list_display = ('__str__', 'info')
class ImageAdmin(admin.ModelAdmin):
model = Image
readonly_fields = ["picture_image"]
def picture_image(self, obj):
return mark_safe('<img src="{url}" width="{width}" height={height} />'.format(
url=obj.picture.url,
width=obj.picture.width,
height=obj.picture.height,
)
)
admin.site.register(Flat, FlatAdmin)
admin.site.register(Tenant, TenantAdmin)
admin.site.register(Agreement, AgreementAdmin)
admin.site.register(Operation, OperationAdmin)
admin.site.register(OperationDict)
admin.site.register(Landlord)
admin.site.register(Image, ImageAdmin)
|
gregzajac/MyRent | MyRent/apps.py | <reponame>gregzajac/MyRent
from django.apps import AppConfig
class MyrentConfig(AppConfig):
name = 'MyRent'
|
gregzajac/MyRent | MyRent/migrations/0005_auto_20200302_1135.py | # Generated by Django 3.0.3 on 2020-03-02 10:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('MyRent', '0004_tenant_user'),
]
operations = [
migrations.RenameField(
model_name='agreement',
old_name='agreement_description',
new_name='info',
),
migrations.RenameField(
model_name='flat',
old_name='flat_description',
new_name='info',
),
migrations.RenameField(
model_name='tenant',
old_name='tenant_description',
new_name='info',
),
migrations.AddField(
model_name='operation',
name='info',
field=models.TextField(null=True, verbose_name='Dodatkowe info'),
),
migrations.AlterField(
model_name='tenant',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Najemca'),
),
migrations.CreateModel(
name='Landlord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=64, verbose_name='Imię')),
('last_name', models.CharField(max_length=64, verbose_name='Nazwisko')),
('phone', models.CharField(max_length=16, null=True, verbose_name='Telefon')),
('email', models.CharField(max_length=64, null=True, verbose_name='E-mail')),
('info', models.TextField(null=True, verbose_name='Dodatkowe info')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Właściciel')),
],
),
migrations.AddField(
model_name='flat',
name='landlord',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='MyRent.Landlord', verbose_name='Właściciel'),
),
]
|
gregzajac/MyRent | MyRent/views.py | <gh_stars>0
from datetime import datetime, timedelta
from django.contrib.auth.mixins import UserPassesTestMixin
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from MyRent.forms import OperationAgreementForm, ImageFlatForm
from MyRent.models import Flat, Agreement, Operation, Tenant, OperationDict, Image
class FlatListView(ListView):
model = Flat
def get_queryset(self):
user = self.request.user
if user.is_superuser:
flats = Flat.objects.all().order_by("-is_for_rent")
else:
flats = Flat.objects.filter(is_for_rent=True)
return flats
class FlatDetailView(DetailView):
model = Flat
def get_context_data(self, *, object_list=None, **kwargs):
ctx = super().get_context_data(object_list=None, **kwargs)
images = self.object.image_set.all()
ctx.update({'images': images})
return ctx
class AgreementListView(ListView):
model = Agreement
def get_queryset(self):
user = self.request.user
if user.is_superuser:
agreements = Agreement.objects.all().order_by("-agreement_date")
else:
tenant = Tenant.objects.get(user=user)
agreements = Agreement.objects.filter(tenant=tenant).order_by("-agreement_date")
return agreements
class AgreementDetailView(DetailView):
model = Agreement
def get_context_data(self, *, object_list=None, **kwargs):
ctx = super().get_context_data(object_list=None, **kwargs)
operations = Operation.objects.filter(agreement=self.object).order_by("date")
ctx.update({'operations': operations})
balance = 0
for operation in operations:
balance += operation.value
ctx.update({'balance': balance})
return ctx
class AddObligationsView(View):
def get(self, request, id_agreement):
user = self.request.user
if user.is_superuser:
obligation_type = OperationDict.objects.get(pk=1) # operacja naliczenia
agreement = Agreement.objects.get(pk=id_agreement)
actual_obligations = agreement.operation_set.filter(type=obligation_type)
actual_obligation_dates_list = []
for actual_obligation in actual_obligations:
obligation_tmp = datetime(actual_obligation.date.year, actual_obligation.date.month, 1).date()
if obligation_tmp not in actual_obligation_dates_list:
actual_obligation_dates_list.append(obligation_tmp)
start_date = datetime(agreement.date_from.year, agreement.date_from.month, 1).date()
if agreement.date_from.day > 1:
start_date = start_date + timedelta(days=35)
start_date = datetime(start_date.year, start_date.month, 1).date()
if datetime.now().date() <= agreement.date_to:
end_date = datetime.now().date()
else:
end_date = agreement.date_to
while start_date <= end_date:
if start_date not in actual_obligation_dates_list: # dodawanie nowych naliczeń
Operation.objects.create(agreement=agreement,
type=obligation_type,
date=start_date,
value=-agreement.mth_payment_value,
info=f"Naliczenie za okres {start_date.year}/{start_date.month}")
start_date = start_date + timedelta(days=35)
start_date = datetime(start_date.year, start_date.month, 1).date()
return redirect(f"/myrent/agreement/{id_agreement}")
class CreateFlatView(UserPassesTestMixin, CreateView):
model = Flat
fields = "__all__"
success_url = reverse_lazy('flat-list')
def test_func(self):
return self.request.user.is_superuser
class FlatDeleteView(DeleteView):
model = Flat
success_url = reverse_lazy('flat-list')
class FlatUpdateView(UpdateView):
model = Flat
fields = "__all__"
template_name_suffix = '_update_form'
success_url = reverse_lazy('flat-list')
class AgreementCreateView(CreateView):
model = Agreement
fields = "__all__"
success_url = reverse_lazy('agreement-list')
def get_form(self, form_class=None):
form = super().get_form(form_class)
form.fields['flat'].queryset = Flat.objects.filter(is_for_rent=True)
return form
class AgreementDeleteView(DeleteView):
model = Agreement
success_url = reverse_lazy('agreement-list')
class AgreementUpdateView(UpdateView):
model = Agreement
fields = "__all__"
template_name_suffix = '_update_form'
success_url = reverse_lazy('agreement-list')
class OperationCreateView(View):
def get(self, request, agreement_id):
agreement = Agreement.objects.get(pk=agreement_id)
form = OperationAgreementForm()
ctx = {
"form": form,
"agreement": agreement
}
return render(request, "MyRent/operation_form.html", ctx)
def post(self, request, agreement_id):
form = OperationAgreementForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.agreement = Agreement.objects.get(pk=agreement_id)
obj.save()
return redirect(reverse_lazy('agreement-detail', kwargs={"pk": agreement_id}))
class OperationDeleteView(DeleteView):
model = Operation
def get_success_url(self):
agreement_id = self.object.agreement.id
return reverse_lazy('agreement-detail', kwargs={"pk": agreement_id})
class OperationUpdateView(UpdateView):
model = Operation
fields = ["type", "date", "value", "info"]
template_name_suffix = '_update_form'
def get_success_url(self):
agreement_id = self.object.agreement.id
return reverse_lazy('agreement-detail', kwargs={"pk": agreement_id})
class ImageCreateView(View):
def get(self, request, flat_id):
flat = Flat.objects.get(pk=flat_id)
form = ImageFlatForm()
ctx = {
"form": form,
"flat": flat
}
return render(request, "MyRent/image_form.html", ctx)
def post(self, request, flat_id):
form = ImageFlatForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.flat = Flat.objects.get(pk=flat_id)
obj.save()
return redirect(reverse_lazy('flat-detail', kwargs={"pk": flat_id}))
class ImageDeleteView(DeleteView):
model = Image
def get_success_url(self):
flat_id = self.object.flat.id
return reverse_lazy('flat-detail', kwargs={"pk": flat_id})
class TenantListView(ListView):
model = Tenant
class TenantCreateView(CreateView):
model = Tenant
fields = "__all__"
success_url = reverse_lazy('tenant-list')
class TenantUpdateView(UpdateView):
model = Tenant
fields = "__all__"
template_name_suffix = '_update_form'
success_url = reverse_lazy('tenant-list')
class TenantDeleteView(DeleteView):
model = Tenant
success_url = reverse_lazy('tenant-list')
|
gregzajac/MyRent | MyRent/migrations/0008_auto_20200304_1150.py | # Generated by Django 3.0.3 on 2020-03-04 10:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0007_auto_20200303_1439'),
]
operations = [
migrations.AlterModelOptions(
name='agreement',
options={'verbose_name': 'Umowa', 'verbose_name_plural': 'Umowy'},
),
migrations.AlterModelOptions(
name='flat',
options={'verbose_name': 'Mieszkanie', 'verbose_name_plural': 'Mieszkania'},
),
migrations.AlterModelOptions(
name='landlord',
options={'verbose_name': 'Właściciel', 'verbose_name_plural': 'Właściciele'},
),
migrations.AlterModelOptions(
name='operation',
options={'verbose_name': 'Operacja finansowa', 'verbose_name_plural': 'Operacje finansowe'},
),
migrations.AlterModelOptions(
name='operationdict',
options={'verbose_name': 'Typ operacji finansowej', 'verbose_name_plural': 'Typy operacji finansowych'},
),
migrations.AlterModelOptions(
name='tenant',
options={'verbose_name': 'Najemca', 'verbose_name_plural': 'Najemcy'},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(default='pictures/None/no-img.png', upload_to='pictures/')),
('name', models.CharField(blank=True, max_length=64, null=True)),
('flat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.Flat')),
],
options={
'verbose_name': 'Zdjęcie',
'verbose_name_plural': 'Zdjęcia',
},
),
]
|
gregzajac/MyRent | FinalProject/urls.py | """FinalProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView
from django.urls import path, include
from MyRent import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('myrent/', include('MyRent.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('password_change/',
PasswordChangeView.as_view(template_name="registration/pwd_change.html",
success_url="/password_change/done/"),
name='user-pwd-change'),
path('password_change/done/',
PasswordChangeDoneView.as_view(template_name="registration/pwd_change_done.html"),
name='user-pwd-change-done'),
path('',views.FlatListView.as_view(), name='home')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
gregzajac/MyRent | MyRent/migrations/0010_auto_20200304_1229.py | <reponame>gregzajac/MyRent
# Generated by Django 3.0.3 on 2020-03-04 11:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0009_auto_20200304_1225'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='name',
),
migrations.AddField(
model_name='image',
name='info',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Opis zdjęcia'),
),
migrations.AlterField(
model_name='agreement',
name='info',
field=models.TextField(blank=True, null=True, verbose_name='Dodatkowe info'),
),
migrations.AlterField(
model_name='flat',
name='info',
field=models.TextField(blank=True, null=True, verbose_name='Dodatkowe info'),
),
migrations.AlterField(
model_name='image',
name='flat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MyRent.Flat', verbose_name='Mieszkanie dot. zdjęcia'),
),
migrations.AlterField(
model_name='image',
name='picture',
field=models.ImageField(default='no-img.png', upload_to='', verbose_name='Zdjęcie'),
),
migrations.AlterField(
model_name='landlord',
name='info',
field=models.TextField(blank=True, null=True, verbose_name='Dodatkowe info'),
),
migrations.AlterField(
model_name='operation',
name='info',
field=models.TextField(blank=True, null=True, verbose_name='Dodatkowe info'),
),
]
|
gregzajac/MyRent | MyRent/migrations/0009_auto_20200304_1225.py | <filename>MyRent/migrations/0009_auto_20200304_1225.py
# Generated by Django 3.0.3 on 2020-03-04 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0008_auto_20200304_1150'),
]
operations = [
migrations.AlterField(
model_name='image',
name='picture',
field=models.ImageField(default='no-img.png', upload_to=''),
),
]
|
gregzajac/MyRent | MyRent/migrations/0007_auto_20200303_1439.py | # Generated by Django 3.0.3 on 2020-03-03 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MyRent', '0006_auto_20200303_0943'),
]
operations = [
migrations.AddField(
model_name='flat',
name='is_for_rent',
field=models.BooleanField(default=True, verbose_name='Czy jest do wynajęcia'),
),
migrations.AlterField(
model_name='operationdict',
name='plus_minus',
field=models.SmallIntegerField(choices=[(1, 'PLUS'), (2, 'MINUS')], verbose_name='Wpływ na saldo rozliczeń'),
),
]
|
gregzajac/MyRent | MyRent/forms.py | from django.forms import ModelForm
from MyRent.models import Operation, Image
class OperationAgreementForm(ModelForm):
class Meta:
model = Operation
exclude = ["agreement"]
class ImageFlatForm(ModelForm):
class Meta:
model = Image
exclude = ["flat"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.