content stringlengths 5 1.05M |
|---|
"""
Calculate sentiment score, word count, gets parsed sentence
and words array from news
"""
from datetime import datetime
from misc.analysis import *
from models import Word
def news_with_one_company():
positive_words = Word.select().where(Word.is_positive == True)
negative_words = Word.select().where(Word.is_positive == False)
df = pd.read_csv("./../../data/news/news_with_one_company.csv")
df[["sent_score", "word_count", "words", "parsed_sentence"]] = df.apply(sentence_info_pd, axis=1, args=(
list(positive_words), list(negative_words)))
df.to_csv("./../../data/news/news_with_one_company_and_sentiment_analysis.csv", index=False)
def news_with_one_or_more_company():
positive_words = Word.select().where(Word.is_positive == True)
negative_words = Word.select().where(Word.is_positive == False)
df = pd.read_csv("./../../data/news/news_with_one_or_more_company.csv")
df[["sent_score", "word_count", "words", "parsed_sentence"]] = df.apply(sentence_info_pd, axis=1, args=(
list(positive_words), list(negative_words)))
df.to_csv("./../../data/news/news_with_one_or_more_company_and_sentiment_analysis.csv", index=False)
if __name__ == '__main__':
news_with_one_company()
# news_with_one_or_more_company()
|
"""Define class for Department argument."""
from args import _ArgType, _InputValue
from args.meta_arg import MetaArg
from args.arg import Arg
class Department(Arg, metaclass=MetaArg, argtype=_ArgType._DEPARTMENT):
"""Department argument."""
# This is necessary since Department is a dependency for other
# arguments (Subject, Course)
@classmethod
def _valid_input_value(cls, input_value: _InputValue) -> bool:
return input_value.isalpha() and input_value.isupper() and \
2 <= len(input_value) <= 8
|
import sys
def IsSortedArray(l):
c=0
i=1
length=l[0]
while i<length:
if l[i]>l[i+1] and c==0:
pass
elif l[i]<l[i+1]:
c=1
pass
else:
print("false")
return
i=i+1
print("true")
strm=''
num=sys.stdin.readlines()
for item in num:
strm+=item
lst=[int(x) for x in strm.splitlines()]
IsSortedArray(lst)
|
def f():
s: str
s = "01234"
print(s[1.2])
f()
|
"""
Script to scrape artists urls from all charts page on SoundCloud.
Cycle through all combinations of country and genre, for top50 and new.
The data pulled is going to be a catalogue of artists we are going to find
urls for and pull streaming data for.
This will include data for artist, track url and name. Keeping only distincts
"""
parameters = open("parameters.py", 'r').read()
exec(parameters)
log = open(r"./chart_scraper_run_log.log", 'a')
log.write('execution started at ' + str(datetime.datetime.now()))
log.write('\n')
log.close()
# =============================================================================
current_catalogue = Catalogue(pd.read_csv(catalogue_name, index_col = 0))
appended_data = Catalogue()
# Error handling: Attempt to run the scraper
try:
# Loop through all combinations and append the data
for chart_type, genre, country in api_genre_country_combos:
chart_url = Sc_scraper("charts", chart_type, genre, country)
data = Catalogue(Sc_scraper.chart_scraper(chart_url))
appended_data = Catalogue.union_catalogue(appended_data, data)
# Reset and rename index
appended_data = Catalogue.rename_index(appended_data, 'chart_num')
appended_data = Catalogue.re_index_catalogue(appended_data)
# ==========================================================================
# Union to catalogue, clean up, and save
new_data = Catalogue.union_catalogue(current_catalogue, appended_data)
new_data = Catalogue.re_index_catalogue(new_data, 'drop index')
Catalogue.save_data(new_data, catalogue_name)
clean_up_artists, clean_up_songs = Catalogue.clean_up_catalogue(
Catalogue(pd.read_csv(catalogue_name, index_col = 0))
)
Catalogue.save_data(clean_up_artists, artist_repository)
Catalogue.save_data(clean_up_songs, song_repository)
# ==========================================================================
# Log Run
log = open(r"./chart_scraper_run_log.log", 'a')
# Write date/time
log.write('execution successful at ' + str(datetime.datetime.now()))
log.write('\n')
log.close()
# If fails then log that the run failed
except:
# Log Run
log = open(r"./chart_scraper_run_log.log", 'a')
# Write date/time
log.write('execution failed at ' + str(datetime.datetime.now()))
log.write('\n')
log.close()
|
import pokepy
import random
search = pokepy.V2Client()
def get_pokemon_info(name):
try:
pokemon = search.get_pokemon(name)
except Exception:
return
species = search.get_pokemon_species(pokemon.id)
stats = {
(pokemon.stats[i].stat.name).upper(): pokemon.stats[i].base_stat
for i in range(len(pokemon.stats))
}
total = 0
for stat in stats.values():
total += stat
stats["**TOTAL**"] = total
lore = [
text.flavor_text
for text in species.flavor_text_entries
if text.language.name == "en"
]
info = {
"name": (pokemon.name).capitalize(),
"id": pokemon.id,
"color": species.color.name,
"height": f"{pokemon.height/10} m",
"weight": f"{pokemon.weight/10} kg",
"image": f"https://img.pokemondb.net/artwork/{pokemon.name}.jpg",
"types": [typ.type.name for typ in pokemon.types],
"abilities": [
(ability.ability.name).capitalize() for ability in pokemon.abilities
],
"stats": stats,
"lore": lore[random.randint(0, len(lore) - 1)]
.replace("\x0c", " ")
.replace("\n", " "),
}
return info
|
# Generated by Django 3.1.6 on 2021-03-01 15:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0002_news_text'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'ordering': ['-created_date']},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('comments', models.TextField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.profile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import tkinter as tk
from tkinter import ttk
from app import tkinter_logic
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from app import card_fetcher
from app import Graph_functions
# GLOBALS
LARGE_FONT = ("Verdana", 12)
def draw_canvas(self, function, object):
canvas = FigureCanvasTkAgg(function, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
object.canvas = canvas
class The_app(tk.Tk):
def __init__(self, *args, **kwargs):
# Inherited class init
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.iconbitmap(self, default="fox_icon.ico")
tk.Tk.wm_title(self, "MagicApp")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
# Dictionary of different frames
self.frames = {}
for F in ([StartPage]):
frame = F(container, self)
self.frames[F] = frame
# Place the frame object on the grid.
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class Graph_object:
def __init__(self):
pass
def line(self):
return Graph_functions.price_lineplot("5314bae2-4930-4f8a-8a52-853bc3feb88f",
card_fetcher.get_all_standard())
def pieplot(self):
return Graph_functions.pie_graph(tkinter_logic.MANIPULABLE_DATAFRAME, "rarity")
def swarmplot(self):
return Graph_functions.swarmplot(tkinter_logic.MANIPULABLE_DATAFRAME, "rarity", "usd")
def set_active_canvas(self, canvas):
self.canvas = canvas
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Main page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
graph_object = Graph_object()
pie_plot_button = ttk.Button(self, text='Pie Plot',
command=lambda: draw_canvas(self, graph_object.pieplot(), graph_object))
pie_plot_button.pack()
boxplot_button = ttk.Button(self, text='Boxplot',
command=lambda: draw_canvas(self, graph_object.swarmplot(), graph_object))
boxplot_button.pack()
line_button = ttk.Button(self, text='Boxplot',
command=lambda: draw_canvas(self, graph_object.line(), graph_object))
line_button.pack()
destroy2_button = ttk.Button(self, text="Destroy 2",
command=lambda: graph_object.canvas.get_tk_widget().destroy())
destroy2_button.pack()
app = The_app()
app.mainloop()
|
import re
from typing import Pattern, Callable
from atcodertools.codegen.code_generators import cpp, java, rust, python
from atcodertools.codegen.models.code_gen_args import CodeGenArgs
from atcodertools.tools.templates import get_default_template_path
class LanguageNotFoundError(Exception):
pass
class Language:
def __init__(self,
name: str,
display_name: str,
extension: str,
submission_lang_pattern: Pattern[str],
default_code_generator: Callable[[CodeGenArgs], str],
default_template_path: str,
):
self.name = name
self.display_name = display_name
self.extension = extension
self.submission_lang_pattern = submission_lang_pattern
self.default_code_generator = default_code_generator
self.default_template_path = default_template_path
def source_code_name(self, name_without_extension: str) -> str:
# put extension to the name
return "{}.{}".format(name_without_extension, self.extension)
@classmethod
def from_name(cls, name: str):
for l in ALL_LANGUAGES:
if l.name == name:
return l
raise LanguageNotFoundError(
"No language support for '{}'".format(ALL_LANGUAGE_NAMES))
CPP = Language(
name="cpp",
display_name="C++",
extension="cpp",
submission_lang_pattern=re.compile(".*C\\+\\+14 \\(GCC.*"),
default_code_generator=cpp.main,
default_template_path=get_default_template_path('cpp'),
)
JAVA = Language(
name="java",
display_name="Java",
extension="java",
submission_lang_pattern=re.compile(".*Java8.*"),
default_code_generator=java.main,
default_template_path=get_default_template_path('java'),
)
RUST = Language(
name="rust",
display_name="Rust",
extension="rs",
submission_lang_pattern=re.compile(".*Rust \\(1.*"),
default_code_generator=rust.main,
default_template_path=get_default_template_path('rs'),
)
PYTHON = Language(
name="python",
display_name="Python3",
extension="py",
submission_lang_pattern=re.compile(".*Python3.*"),
default_code_generator=python.main,
default_template_path=get_default_template_path('py'),
)
ALL_LANGUAGES = [CPP, JAVA, RUST, PYTHON]
ALL_LANGUAGE_NAMES = [lang.display_name for lang in ALL_LANGUAGES]
|
# coding=utf-8
from datetime import datetime
from lxml import etree
from app.utils.parse_url import parse_url
from app.database import MongodbClient
class CrawlXs147(object):
"""
147xs crawler
source: http://www.147xs.com/
"""
def __init__(self):
self._base_url = 'http://www.147xs.com'
self._xs147 = 'http://www.147xs.com/sort/{category}/'
self._novel_html = None
self.crawl_time = datetime.now()
self._params = {'Cookie': 'bdshare_firstime=1534250751693; Hm_lvt_f9e74ced1e1a12f9e31d3af8376b6d63=1534250752,1534339609; Hm_lpvt_f9e74ced1e1a12f9e31d3af8376b6d63=1534340938'}
def _novel_state(self, update_info):
"""get novels state, judge this novel is finished or unfinished"""
spt = list(map(int, update_info.split('-')))
update_time = datetime(spt[0], spt[1], spt[2])
interval_days = (self.crawl_time - update_time).days
state = '更新中' if interval_days < 7 else '完结'
return state
def get_url(self, category):
"""crawler all novels url according to category"""
url = self._xs147.format(category=category)
res = parse_url(url, 'utf-8', params=self._params)
html = etree.HTML(res)
urls = html.xpath('//*[@id="main"]/div[@class="novelslist"]/div[1]/ul/li/a/@href')
names = html.xpath('//*[@id="main"]/div[@class="novelslist"]/div[1]/ul/li/a/text()')
authors = html.xpath('//*[@id="main"]/div[@class="novelslist"]/div[1]/ul/li/text()')
for url, name, author in zip(urls, names, authors):
novel = {
'source_url': url,
'name': name,
'author': author.lstrip('/')
}
yield novel
def get_info(self, novel_url):
"""crawler novel info"""
res = parse_url(novel_url, 'utf-8', params=self._params)
html = etree.HTML(res)
self._novel_html = html
image = html.xpath('//*[@id="fmimg"]/a/img/@src')[0]
intro = html.xpath('//*[@id="intro"]/text()')[2]
update_info = html.xpath('//*[@id="info"]/p[3]/text()')[0]
_datetime = update_info.split(':')[1].split(' ')[0]
state = self._novel_state(_datetime)
info = {
'image': self._base_url + image,
'intro': intro,
'state': state
}
return info
def get_menu(self, url=None):
"""crawling novel menu"""
if self._novel_html is None:
if url:
res = parse_url(url, 'utf-8', params=self._params)
self._novel_html = etree.HTML(res)
else: return False
chapter_url = self._novel_html.xpath('//*[@id="list"]/dd/a/@href')[9:]
chapter_title = self._novel_html.xpath('//*[@id="list"]/dd/a/text()')[9:]
for url, title in zip(chapter_url, chapter_title):
chapter = {
'source_url': self._base_url + url,
'title': title
}
yield chapter
def get_chapter(self, chapter_url):
"""crawler chapter content"""
res = parse_url(chapter_url, 'utf-8', params=self._params)
html = etree.HTML(res)
content = html.xpath('//*[@id="content"]/p/text()')
return '\n'.join(content)
if __name__ == '__main__':
import time
db = MongodbClient('novel', 'localhost', 27017)
crawler = CrawlXs147()
cates = {
'1': 'xuanhuan',
'2': 'xiuzhen',
'3': 'dushi',
'4': 'lishi',
'5': 'zhichang',
'6': 'wangyou',
'7': 'kehuan',
'8': 'junshi',
'9': 'lingyi',
'10': 'qihuan',
'11': 'wuxia',
'12': 'jingji',
'13': 'tongren',
'14': 'yanqing'
}
for category, value in cates.items():
for novel in crawler.get_url(category):
db.change_table('novel')
novel['category'] = value
info = crawler.get_info(novel['source_url'])
data = dict(novel, **info)
print('success novel: ', data['source_url'])
for c in crawler.get_menu(novel['source_url']):
exists_novel = db.exists({'name': data['name'], 'author': data['author']})
if exists_novel and c in exists_novel.get('chapters', []):
print('exists')
continue
novel.setdefault('chapters', []).append(c)
print('success chapter: ', c['source_url'])
db.put(novel)
time.sleep(1)
|
from pacman import exceptions
import numpy
class BaseKeyAndMask(object):
""" A Key and Mask to be used for routing
"""
def __init__(self, base_key, mask):
"""
:param base_key: The routing key
:type base_key: int
:param mask: The routing mask
:type mask: int
:raise PacmanConfigurationException: If key & mask != key i.e. the key\
is not valid for the given mask
"""
self._base_key = base_key
self._mask = mask
if base_key & mask != base_key:
raise exceptions.PacmanConfigurationException(
"This routing info is invalid as the mask and key together "
"alters the key. This is deemed to be a error from "
"spynnaker's point of view and therefore please rectify and"
"try again")
@property
def key(self):
""" The base key
:return: The base key
:rtype: int
"""
return self._base_key
@property
def key_combo(self):
""" The key combined with the mask
:return:
"""
return self._base_key & self._mask
@property
def mask(self):
""" The mask
:return: The mask
:rtype: int
"""
return self._mask
def __eq__(self, key_and_mask):
if not isinstance(key_and_mask, BaseKeyAndMask):
return False
else:
return (self._base_key == key_and_mask.key and
self._mask == key_and_mask.mask)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "KeyAndMask:{}:{}".format(hex(self._base_key), hex(self._mask))
def __str__(self):
return self.__repr__()
def __hash__(self):
return self.__repr__().__hash__()
@property
def n_keys(self):
""" The total number of keys that can be generated given the mask
:return: The number of keys
:rtype: int
"""
# converts mask into array of bit representation
unwrapped_mask = numpy.unpackbits(
numpy.asarray([self._mask], dtype=">u4").view(dtype="uint8"))
# how many zeros are in the bit representation array
zeros = numpy.where(unwrapped_mask == 0)[0]
# number of keys available from this mask size
return 2 ** len(zeros)
def get_keys(self, key_array=None, offset=0, n_keys=None):
""" Get the ordered list of keys that the combination allows
:param key_array: Optional array into which the returned keys will be\
placed
:type key_array: array-like of int
:param offset: Optional offset into the array at which to start\
placing keys
:type offset: int
:param n_keys: Optional limit on the number of keys returned. If less\
than this number of keys are available, only the keys\
available will be added
:type n_keys: int
:return: A tuple of an array of keys and the number of keys added to\
the array
:rtype: (array-like of int, int)
"""
# Get the position of the zeros in the mask - assume 32-bits
unwrapped_mask = numpy.unpackbits(
numpy.asarray([self._mask], dtype=">u4").view(dtype="uint8"))
zeros = numpy.where(unwrapped_mask == 0)[0]
# We now know how many values there are - 2^len(zeros)
max_n_keys = 2 ** len(zeros)
if key_array is not None and len(key_array) < max_n_keys:
max_n_keys = len(key_array)
if n_keys is None or n_keys > max_n_keys:
n_keys = max_n_keys
if key_array is None:
key_array = numpy.zeros(n_keys, dtype=">u4")
# Create a list of 2^len(zeros) keys
unwrapped_key = numpy.unpackbits(
numpy.asarray([self._base_key], dtype=">u4").view(dtype="uint8"))
# for each key, create its key with the idea of a neuron id being
# continuous and live at an offset position from the bottom of
# the key
for value in range(n_keys):
key = numpy.copy(unwrapped_key)
unwrapped_value = numpy.unpackbits(
numpy.asarray([value], dtype=">u4")
.view(dtype="uint8"))[-len(zeros):]
key[zeros] = unwrapped_value
key_array[value + offset] = \
numpy.packbits(key).view(dtype=">u4")[0].item()
return key_array, n_keys
|
# Time: O(n), search for s in 2*s
# Space: O(n)
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
# return s in (2*s)[1:-1]
return s in (s[1:]+s[:-1])
|
from typing import Any, Dict
class AST:
def type_name(self) -> str:
raise NotImplementedError
def __eq__(self, rhs: Any) -> bool:
if isinstance(rhs, AST):
return self.type_name() == rhs.type_name() and \
self.state_dict() == rhs.state_dict()
return False
def __hash__(self) -> int:
return hash(self.type_name()) ^ hash(tuple(self.state_dict().items()))
def state_dict(self) -> Dict[str, Any]:
raise NotImplementedError
def __str__(self) -> str:
raise NotImplementedError
def __repr__(self) -> str:
return self.__str__()
class Rectangle(AST):
def __init__(self, w: int, h: int):
self.w = w
self.h = h
def type_name(self) -> str:
return "Rectangle"
def state_dict(self) -> Dict[str, Any]:
return {"w": self.w, "h": self.h}
def __str__(self) -> str:
return f"Rectangle(w={self.w},h={self.h})"
class Circle(AST):
def __init__(self, r: int):
self.r = r
def type_name(self) -> str:
return "Circle"
def state_dict(self) -> Dict[str, Any]:
return {"r": self.r}
def __str__(self) -> str:
return f"Circle(r={self.r})"
class Translation(AST):
def __init__(self, x: int, y: int, child: AST):
self.x = x
self.y = y
self.child = child
def type_name(self) -> str:
return "Translation"
def state_dict(self) -> Dict[str, Any]:
return {"x": self.x, "y": self.y, "child": self.child}
def __str__(self) -> str:
return f"Translation(x={self.x},y={self.y},child={self.child})"
class Rotation(AST):
def __init__(self, theta_degree: int, child: AST):
self.theta_degree = theta_degree
self.child = child
def type_name(self) -> str:
return "Rotation"
def state_dict(self) -> Dict[str, Any]:
return {"theta_degree": self.theta_degree, "child": self.child}
def __str__(self) -> str:
return f"Rotation(theta={self.theta_degree},child={self.child})"
class Union(AST):
def __init__(self, a: AST, b: AST):
self.a = a
self.b = b
def type_name(self) -> str:
return "Union"
def state_dict(self) -> Dict[str, Any]:
return {"a": self.a, "b": self.b}
def __str__(self) -> str:
return f"Union(a={self.a},b={self.b})"
class Difference(AST):
def __init__(self, a: AST, b: AST):
self.a = a
self.b = b
def type_name(self) -> str:
return "Difference"
def state_dict(self) -> Dict[str, Any]:
return {"a": self.a, "b": self.b}
def __str__(self) -> str:
return f"Difference(a={self.a},b={self.b})"
class Reference(AST):
def __init__(self, id: int):
self.id = id
def type_name(self) -> str:
return "Reference"
def state_dict(self) -> Dict[str, Any]:
return {"id": self.id}
def __str__(self) -> str:
return f"Reference(id={self.id})"
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.FilteredInstanceTableView.as_view(), name='instances'),
path('instances', views.FilteredInstanceTableView.as_view(), name='instances'),
path('update', views.update, name='update'),
path('update/status', views.update_status, name='update_status'),
path('delete/<str:key_id>', views.delete, name='delete_instance'),
]
|
#!/usr/bin/env python3
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_copybutton",
]
autoclass_content = "class"
# autodoc_typehints = "description"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "logpyle"
copyright = "2017, Andreas Kloeckner"
author = "Andreas Kloeckner"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
ver_dic = {}
exec(
compile(open("../logpyle/version.py").read(), "../logpyle/version.py", "exec"),
ver_dic,
)
version = ".".join(str(x) for x in ver_dic["VERSION"])
release = ver_dic["VERSION_TEXT"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
nitpicky = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_theme_options = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
intersphinx_mapping = {
"https://docs.python.org/3": None,
"https://numpy.org/doc/stable": None,
"https://documen.tician.de/pymbolic/": None,
"https://documen.tician.de/pytools/": None,
}
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import json
import base64
from PIL import Image
import io
from model_handler import ModelHandler
def init_context(context):
context.logger.info("Init context... 0%")
model = ModelHandler()
context.user_data.model = model
context.logger.info("Init context...100%")
def handler(context, event):
context.logger.info("call handler")
data = event.body
pos_points = data["pos_points"]
neg_points = data["neg_points"]
threshold = data.get("threshold", 0.5)
buf = io.BytesIO(base64.b64decode(data["image"]))
image = Image.open(buf)
polygon = context.user_data.model.handle(image, pos_points,
neg_points, threshold)
return context.Response(body=json.dumps(polygon),
headers={},
content_type='application/json',
status_code=200)
|
from setuptools import setup
setup(
name="sync_location",
version="1.1.3",
author="Jelmer van Arnhem",
description="Read, parse and expose syncthing folder locations by name",
license="MIT",
py_modules=["sync_location"],
include_package_data=True,
python_requires=">= 3.*",
setup_requires=["setuptools"],
entry_points={"console_scripts": ["sync_location= sync_location:main"]}
)
|
X = 11
def g1():
print(X)
def g2():
global X
X = 22
def h1():
X = 33
def nested():
print(X)
nested()
def h2():
X = 33
def nested():
nonlocal X
X = 44
nested()
print(X)
if __name__ == '__main__':
g1()
g2()
g1()
h1()
h2()
|
import os
import pytest
from _pytest.fixtures import SubRequest
@pytest.fixture(scope='module', autouse=True)
def setup_dir(request: SubRequest) -> None:
os.mkdir('test-dir', mode=0o777)
def teardown_dir() -> None:
os.rmdir('test-dir-1')
request.addfinalizer(teardown_dir)
@pytest.mark.dir
def test_rename_dir() -> None:
os.rename('test-dir', 'test-dir-1')
assert os.path.basename('test-dir-1') == 'test-dir-1'
@pytest.mark.dir
def test_list_dir() -> None:
assert os.listdir('test-dir-1') == [] |
#!/usr/bin/python
# Gimp-Python - allows the writing of Gimp plugins in Python.
# Copyright (C) 2006 Manish Singh <yosh@gimp.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gimpfu import *
import sys
def code_eval(code):
if code == '-':
code = sys.stdin
exec code in globals()
register(
"python-fu-eval",
"Evaluate Python code",
"Evaluate python code under the python interpreter (primarily for batch mode)",
"Manish Singh",
"Manish Singh",
"2006",
None,
None,
[
(PF_STRING, "code", "The code to evaluate", "")
],
[],
code_eval)
main()
|
# Convert knowledge graph in NTRIPLES format to tsv file
import pandas as pd
df = pd.read_csv('kgraph_geodata.nt',
sep=r'\s+',
header=None,
names=None,
dtype=str,
usecols=[0, 1, 2])
df = df.astype(str)
df[[0, 1, 2]] = df[[0, 1, 2]].replace({'>': ''}, regex=True)
df[[0, 1, 2]] = df[[0, 1, 2]].replace({'<': ''}, regex=True)
print(df.shape)
df.to_csv('data/kg_triples.tsv', index=False, header=False, sep='\t')
|
import torch
import railrl.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.reset_free.pointmass.generate_state_based_vae_dataset import generate_vae_dataset
from multiworld.envs.pygame.point2d import Point2DWallEnv
from railrl.launchers.launcher_util import run_experiment
from railrl.misc.ml_util import PiecewiseLinearSchedule
from railrl.torch.vae.vae import VAE, VAETrainer, AutoEncoder
import numpy as np
from railrl.pythonplusplus import identity
def generate_dataset(N=1000, test_p=.9):
xs = np.linspace(-np.pi*5, 5*np.pi, N).reshape(-1, 1)
ys = np.sin(xs).reshape(-1, 1)
info = dict()
n = int(N * test_p)
# data = np.hstack((xs, ys))
# train_dataset = data[:n, :]
# test_dataset = data[n:, :]
train_dataset = ys[:n, :]
test_dataset = ys[n:, :]
# plt.plot(xs, ys)
# plt.show()
return train_dataset, test_dataset, info
def experiment(variant):
from railrl.core import logger
beta = variant["beta"]
representation_size = variant["representation_size"]
train_data, test_data, info = generate_dataset(
)
logger.save_extra_data(info)
logger.get_snapshot_dir()
if 'beta_schedule_kwargs' in variant:
kwargs = variant['beta_schedule_kwargs']
kwargs['y_values'][2] = variant['beta']
kwargs['x_values'][1] = variant['flat_x']
kwargs['x_values'][2] = variant['ramp_x'] + variant['flat_x']
beta_schedule = PiecewiseLinearSchedule(**variant['beta_schedule_kwargs'])
else:
beta_schedule = None
output_scale=1
if variant['algo_kwargs']['is_auto_encoder']:
m = AutoEncoder(representation_size,
train_data.shape[1],
output_scale=output_scale,
**variant['vae_kwargs']
)
else:
m = VAE(representation_size,
train_data.shape[1],
output_scale=output_scale,
**variant['vae_kwargs']
)
t = VAETrainer(train_data, test_data, m, beta=beta,
beta_schedule=beta_schedule, **variant['algo_kwargs'])
for epoch in range(variant['num_epochs']):
t.train_epoch(epoch)
t.test_epoch(epoch)
if __name__ == "__main__":
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 1
mode = 'ec2'
exp_prefix = 'sin_sweep_beta_schedule'
variant = dict(
beta=.1,
flat_x=50,
ramp_x=50,
num_epochs=200,
algo_kwargs=dict(
batch_size=64,
is_auto_encoder=False,
),
vae_kwargs=dict(
hidden_sizes=[100, 100],
output_activation=identity,
),
representation_size=16,
beta_schedule_kwargs = dict(
x_values=[0, 50, 100],
y_values=[0, 0, .1],
)
)
search_space = {
'representation_sizes':[4],
'algo_kwargs.lr': [1e-3],
'beta': [.01, .05, .1],
'vae_kwargs.output_activation':[torch.tanh, identity],
'flat_x':[25, 50, 75],
'ramp_x':[50, 100],
'vae_kwargs.hidden_sizes': [[32, 32], [10, 10], [100, 100]],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for _ in range(n_seeds):
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
run_experiment(
experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
)
|
# SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <support@opennetworking.org>
#
# SPDX-License-Identifier: BSD-3-Clause
import os
def is_dev_mode() -> bool:
"""
Returns whether the environment is set for dev mode
"""
return os.environ.get('MAGMA_DEV_MODE') == '1'
def is_docker_network_mode() -> bool:
"""
Returns whether the environment is set for dev mode
"""
return os.environ.get('DOCKER_NETWORK_MODE') == '1'
|
import json
import logging
import requests
import time
from piperci.gman.exceptions import TaskError
log = logging.getLogger(__name__)
def request_new_task_id(
run_id=None, gman_url=None, project=None, caller=None, status=None, thread_id=None
):
"""
Request a new TaskID from GMan, associated with a given RunID
:param run_id: Unique identifier to correlate taskIDs as a string
:param gman_url: GMan endpoint as a string
:param project: Name of your project as a string
:param caller: The invoker of the task. as a string
:param status: The initial status of the task. This must either be "started"
or "received". Unless the caller of this function is an executor this should
always be "started"
:param: thread_id: The thread_id that this task should be associated with.
This will mainly be used by executors who need to tie their task_id to the
task_id of it's parent.
:return: JSON resposne from GMan
"""
if not status or (status != "started" and status != "received"):
raise ValueError(f"Invalid status '{status}'. Must be 'received' or 'started'.")
if status == "received" and not thread_id:
raise ValueError(f"thread_id must be specified if status is received.")
try:
log.debug(f"Requesting new taskID from gman at {gman_url}")
data = {
"run_id": run_id,
"caller": caller,
"project": project,
"message": "Requesting new taskID",
"status": status,
}
if thread_id:
data.update({"thread_id": thread_id})
r = requests.post(f"{gman_url}/task", data=json.dumps(data))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"GMan returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.HTTPError(
f"Failed to request new task id from gman. \n\n{e}"
)
return r.json()
def wait_for_task_status(task_id=None, status=None, gman_url=None, retry_max=10):
"""
Returns true if given task_id has a status of the given status. If retry_max
is reached without a matching status then a Timeout exception is raised. If
the given task returns with a "failed" state then we raise a TaskError exception.
:param task_id: TaskID to query for as a string
:param status: Status to wait for as a string
:param gman_url: GMan endpoint as a string
:param retry_max: The number of times to retry the query as an integer
:return: True or exception
"""
retries = 0
while retries < retry_max:
try:
log.debug(f"Checking status of task {task_id}")
r = requests.get(f"{gman_url}/task/{task_id}/events")
r.raise_for_status()
events = [event for event in r.json() if event.get("status") == status]
events_failed = [
event for event in r.json() if event.get("status") == "failed"
]
if len(events_failed):
raise TaskError(
f"Task {task_id} has failed task events. {events_failed}"
)
if len(events):
return True
else:
retries += 1
time.sleep(1)
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"GMan returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to check status of task. \n\n{e}"
)
raise TimeoutError(f"Checking task status timeout for task {task_id}")
def update_task_id(task_id=None, gman_url=None, status=None, message=None):
"""
Updates a taskID status and/or message
:param task_id: TaskID to update as a string
:param gman_url: GMan endpoint as a string
:param status: The status to apply to the task
:param message: The message to apply to the task
:return: JSON response from gman
"""
try:
data = {"message": message, "status": status}
r = requests.put(f"{gman_url}/task/{task_id}", data=json.dumps(data))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"GMan returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to update taskID {task_id}. \n\n{e}"
)
return r.json()
def get_task_id_events(task_id=None, gman_url=None, query_filter=None):
"""
Get a list of taskID events from gman.
Optionally pass a query_fitler lamda expression to filter these events.
For example, to return a list of all failed events for a particular taskID
get_task_id_events(
task_id='1234', gman_url=url, query_filter=lambda x: x.get('status') == 'failed'
)
:param task_id: taskID to query as a string
:param gman_url: GMan endpoint as as string
:param query_filter: lambda expression
:return: List of events
"""
try:
r = requests.get(f"{gman_url}/task/{task_id}/events")
r.raise_for_status()
if query_filter:
return list(filter(query_filter, r.json()))
else:
return r.json()
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"Gman returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to get taskID events for task_id {task_id}. \n\n{e}"
)
def get_thread_id_tasks(thread_id=None, gman_url=None, query_filter=None):
"""
Get a list of tasks associated with the given thread_id
:param thread_id: The thread_id to query with as a sring
:param gman_url: The GMan endpoint as a string
:param query_filter: lambda expression
:return: List of tasks associated with thread_id
"""
try:
r = requests.get(f"{gman_url}/thread/{thread_id}")
r.raise_for_status()
if query_filter:
return list(filter(query_filter, r.json()))
else:
return r.json()
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"Gman returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to get tasks for thread_id {thread_id}. \n\n{e}"
)
def get_thread_id_events(thread_id=None, gman_url=None, query_filter=None):
"""
Get list of all events for a given thread_id, optionally filtered by
a lambda expression.
:param thread_id: Thread ID to query for as a string.
:param gman_url: GMan endpoint as a string.
:param query_filter: Lambda expression
:return: List of task events
"""
try:
r = requests.get(f"{gman_url}/thread/{thread_id}/events")
r.raise_for_status()
if query_filter:
return list(filter(query_filter, r.json()))
else:
return r.json()
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"Gman returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to get tasks for thread_id {thread_id}. \n\n{e}"
)
def wait_for_thread_id_complete(thread_id=None, gman_url=None, retry_max=10):
"""
Wait for all tasks under a given thread_id to return with a status of complete,
up to the retry_max.
:param thread_id: thread_id as a string to search for
:param gman_url: GMan endpoint as a string
:param retry_max: Number of times to retry as an integer
:return: True or exception
"""
retries = 0
while retries < retry_max:
try:
log.debug(f"Checking status of thread {thread_id}")
r = requests.head(f"{gman_url}/thread/{thread_id}")
r.raise_for_status()
running, completed, failed = [
r.headers.get(key)
for key in [
"x-gman-tasks-running",
"x-gman-tasks-completed",
"x-gman-tasks-failed",
]
]
if int(failed) > 0:
raise TaskError(f"Thread {thread_id} has failures")
elif int(running) > 0:
retries += 1
time.sleep(1)
elif int(running) == 0 and int(completed) > 0:
return True
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"GMan returned with a bad status code. \n\n{e}"
)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f"Failed to check status of task. \n\n{e}"
)
raise TimeoutError(f"Checking thread_id status timed out for thread_id {thread_id}")
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 supr3me
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from random import randint
from random import choice as randchoice
from .item import Item
class WeaponSkin(Item):
"""Represents a Weapon Skin
CSGOStash Page: https://csgostash.com/skin/(id)/(name)
"""
__slots__ = ('_spawned', 'weapon_type', 'title', 'description', 'lore', 'collection', 'collections',
'found_in', 'rarity', 'wears', 'can_be_stattrak', 'can_be_souvenir', 'is_stattrak')
def __init__(self, weapon_type: str, title: str, desc: str, lore: str, collection: list, found_in: list, rarity: str, wears: dict):
super().__init__(title, desc, lore, collection, found_in, rarity)
self.weapon_type = weapon_type
self.wears = wears
self.can_be_stattrak = False
self.can_be_souvenir = False
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key)
for key in self.__slots__])
)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.name
def __repr__(self):
return "<Item.WeaponSkin name='%s' custom='%s'>" % (self.title, self._spawned)
def __hash__(self):
return hash((self._spawned, self.title))
@property
def name(self):
if self.weapon_type == 'Knife' or self.weapon_type == 'Gloves':
return f'★ {self.title}'
return self.title
@name.setter
def name(self, value):
self.title = value
def _get_random_wear(self):
"""Helper function that returns a random item wear"""
return randchoice(list(self.wears.keys()))
def _set_prefix(self):
"""Helper function to set StatTrak or Souvenir prefix in random skin variation"""
if self.can_be_stattrak:
st_odds = randint(0, 10)
if st_odds == 10:
self.is_stattrak = True
return 'StatTrak™ '
if self.can_be_souvenir:
return 'Souvenir '
return ''
def get_random_variation(self):
"""Returns a random skin variation.
This is used to return an item entity as you would have in your Steam Inventory
An example variation would be ("AK-47 | Asiimov (Field-Tested)",<wear_img>)
Returns a tuple of skin name and skin image
"""
wear = self._get_random_wear()
image = self.wears[wear]
if wear == 'vanilla':
wear = ''
else:
wear = f'({wear})'
variation = f'{self._set_prefix()} {self.name} {wear}'.strip()
return (variation, image)
@classmethod
def _from_data(cls, d):
"""Object constructor from dictionary
This method is called by the scraper and should not be called manually
"""
weapon_type = d['weapon_type']
title = d['title']
description = d['desc']
lore = d['lore']
collection = d['collection']
found_in = d['found_in']
rarity = d['rarity']
wears = d['wears']
_cls = cls(weapon_type, title, description, lore,
collection, found_in, rarity, wears)
_cls._spawned = False
return _cls
|
from .hj_dict_api import HJDictApi, NotfoundException, MultiWordsException
from .tool import format_hjdict |
#!/usr/bin/python3
"""
Rename a PDF with information taken from its metadata.
Depends on PyPDF2: https://github.com/py-pdf/PyPDF2
Depends on pathvalidate: https://github.com/thombashi/pathvalidate
"""
import argparse
import os
from pathvalidate import sanitize_filename
from PyPDF2 import PdfFileReader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("target")
parser.add_argument("--date", action="store_true")
args = parser.parse_args()
target = args.target
pdf_toread = PdfFileReader(open(target, "rb"))
pdf_info = pdf_toread.getDocumentInfo()
title = pdf_info["/Title"]
author = pdf_info["/Author"]
date = pdf_info["/CreationDate"][2:10]
new_name = []
if author:
new_name.append(sanitize_filename(author).replace(" ", "_"))
if title:
new_name.append(sanitize_filename(title).replace(" ", "_"))
if args.date:
new_name.append(date)
if new_name:
os.rename(target, "{}.pdf".format("_".join(new_name)))
|
import pytest
from terraformpy import TFObject
@pytest.fixture(autouse=True, scope="function")
def reset_tfobject():
TFObject.reset()
|
import tensorflow as tf
class Net(object):
def __init__(self,param_scope):
def _sanitize_var_name(var):
base_scope_name = param_scope.name.split('/')[-1]
return ('/'.join(var.name.split(base_scope_name)[1:])).split(':')[0]
save_vars = {_sanitize_var_name(var) : var for var in
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,param_scope.name) }
print('save vars:')
for v in sorted(save_vars.keys()):
print(v)
self.saver = tf.train.Saver(var_list=save_vars,max_to_keep = 0)
def save(self,dir,step=None):
sess = tf.get_default_session()
if(step is not None):
self.saver.save(sess,dir+'/model.ckpt',global_step=step)
else :
self.saver.save(sess,dir+'/last.ckpt')
def load(self,model):
sess = tf.get_default_session()
self.saver.restore(sess,model)
@staticmethod
def _build_net(spec,_t):
for block in spec:
_t = block(_t)
return _t
|
"""Generate the cweno extension module."""
import pathlib
import jinja2
import pyweno
jinja2.filters.FILTERS['pm'] = lambda x: "{:+d}".format(x)
jinja2.filters.FILTERS['pmr'] = lambda x: "{:+d}".format(x).replace('-','m').replace('+','p')
gendir = pathlib.Path(__file__).parent
srcdir = gendir.parent / 'src'
with open(gendir / 'weno_smoothness.tmpl.c', 'r') as f:
smoothness = jinja2.Template(f.read())
with open(gendir / 'weno_reconstruction.tmpl.c', 'r') as f:
reconstruction = jinja2.Template(f.read())
for k in range(3, 4):
print('k:', k)
kernel = pyweno.kernels.KernelGenerator('c', order=2*k-1)
ksmoothness = kernel.smoothness(reuse=True)
name = f'smoothness{k:03d}'
with open(srcdir / f'weno_{name}.c', 'w') as f:
f.write(smoothness.render(
name=name, k=k, burnin=kernel.burnin, kernel=ksmoothness))
for pts in [ 'left', 'right', 'middle' ]:
# 'gauss_legendre', 'gauss_lobatto', 'gauss_radau' ]:
print(' point:', pts)
if pts == 'left':
func = lambda n: [ -1 ]
N = [ 1 ]
elif pts == 'right':
func = lambda n: [ 1 ]
N = [ 1 ]
elif pts == 'middle':
func = lambda n: [ 0 ]
N = [ 1 ]
else:
func = getattr(pyweno.points, pts)
N = range(2, k+1)
xi = func
for n in N:
kernel = pyweno.kernels.KernelGenerator('c', order=2*k-1, xi=func(n))
kweights = kernel.weights()
kreconstruction = kernel.reconstruction()
sigma = pyweno.symbols.sigma.all(k)
omega = pyweno.symbols.omega.all(n, k, kernel.split)
fr = pyweno.symbols.fr.all(n, k)
fs = pyweno.symbols.fs.all(n)
name = pts + f'{k:03d}{n:03d}'
with open(srcdir / f'weno_{name}.c', 'w') as f:
f.write(reconstruction.render(
name=name, k=k, n=n,
omega=omega,
variables={'weights': [ x['name'] for x in sigma ],
'reconstruct': [ x['name'] for x in sigma ]
+ [ x['name'] for x in omega ]
+ [ x['name'] for x in fr ]
+ [ x['name'] for x in fs ] },
weights=kweights, reconstruction=kreconstruction))
|
import oneflow as flow
import argparse
import numpy as np
import time
from model.build_model import build_model
from utils.imagenet1000_clsidx_to_labels import clsidx_2_labels
from utils.numpy_data_utils import load_image
def _parse_args():
parser = argparse.ArgumentParser("flags for test ViT")
parser.add_argument(
"--model_path", type=str, default="./vit_b_16_384", help="model path",
)
parser.add_argument(
"--model_arch",
type=str,
default="vit_b_16_384",
choices=[
"vit_b_16_224",
"vit_b_16_384",
"vit_b_32_224",
"vit_b_32_384",
"vit_l_16_224",
"vit_l_16_384",
],
help="model architecture",
)
parser.add_argument("--image_size", type=int, default=384, help="input image size")
parser.add_argument("--image_path", type=str, default="", help="input image path")
return parser.parse_args()
def main(args):
start_t = time.time()
model = build_model(args)
end_t = time.time()
print("init time : {}".format(end_t - start_t))
start_t = time.time()
pretrain_models = flow.load(args.model_path)
model.load_state_dict(pretrain_models)
end_t = time.time()
print("load params time : {}".format(end_t - start_t))
model.eval()
model.to("cuda")
start_t = time.time()
image = load_image(args.image_path, image_size=(args.image_size, args.image_size))
image = flow.Tensor(image, device=flow.device("cuda"))
predictions = model(image).softmax()
predictions = predictions.numpy()
end_t = time.time()
print("infer time : {}".format(end_t - start_t))
clsidx = np.argmax(predictions)
print(
"predict prob: %f, class name: %s"
% (np.max(predictions), clsidx_2_labels[clsidx])
)
if __name__ == "__main__":
args = _parse_args()
main(args)
|
print("1")
print("2",end="")
print(3)
print("4",end="")
print("5",end="")
print("6")
print("7",end="")
print("8",end="")
print("9",end="")
print("10")
|
import depthai as dai
import time
import cv2
from pathlib import Path
# Draw ROI and class label of each detected thing if confidence>50%
def frame_process(frame, tensor):
color = (255,0,0)
keeped_roi = []
for i in range(100): # There is 100 detections, not all of them are relevant
if (tensor[i*7 + 2] >0.5): # 3rd value of each detection is the confidence
keeped_roi.append(tensor[i*7:i*7+7])
spatial_calculator_config = dai.SpatialLocationCalculatorConfig()
for id, label, confidence, left, top, right, bottom in keeped_roi:
topleft = (int(left*frame_width), int(top*frame_height))
bottomright = (int(right*frame_width), int(bottom*frame_height))
cv2.rectangle(frame, topleft, bottomright, color, 2) # ROI
cv2.putText(frame, labels[int(label)] + f" {int(confidence * 100)}%", (topleft[0] + 10, topleft[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color) # Label and confidence
# Add ROIs to spatial location calculator config
spatial_config_data = dai.SpatialLocationCalculatorConfigData()
spatial_config_data.depthThresholds.lowerThreshold = 250
spatial_config_data.depthThresholds.upperThreshold = 5000
spatial_config_data.roi = dai.Rect(dai.Point2f(topleft[0], topleft[1]), dai.Point2f(bottomright[0], bottomright[1]))
spatial_calculator_config.addROI(spatial_config_data)
# Put spatial location info inside of the ROI
if(len(keeped_roi)>0):
spatial_config_input_queue.send(spatial_calculator_config)
spatial_data = spatial_calculator_queue.get().getSpatialLocations()
for depth_data in spatial_data:
cv2.putText(frame, f"X: {int(depth_data.spatialCoordinates.x)} mm", (topleft[0] + 10, topleft[1] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Y: {int(depth_data.spatialCoordinates.y)} mm", (topleft[0] + 10, topleft[1] + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Z: {int(depth_data.spatialCoordinates.z)} mm", (topleft[0] + 10, topleft[1] + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
return frame
# Define program parameters
nn_path = str(Path(__file__).parent) + "/../_models/coronamask.blob" # path to the neural network compiled model (.blob)
labels = ["background", "no mask", "mask", "no mask"]
pipeline = dai.Pipeline()
frame_width = 300
frame_height = 300
fps_limit = 20
# Configure spatial location calculator
spatial_location_calculator = pipeline.createSpatialLocationCalculator()
spatial_location_calculator.setWaitForConfigInput(True)
# Prepare depth handling
depth = pipeline.createStereoDepth()
depth.setConfidenceThreshold(255)
depth.depth.link(spatial_location_calculator.inputDepth)
# Set spatial location calculator input/output stream
spatial_data_output_stream = pipeline.createXLinkOut()
spatial_data_output_stream.setStreamName("spatialData")
spatial_location_calculator.out.link(spatial_data_output_stream.input)
spatial_config_input_stream = pipeline.createXLinkIn()
spatial_config_input_stream.setStreamName("spatialCalcConfig")
spatial_config_input_stream.out.link(spatial_location_calculator.inputConfig)
# Set rgb camera source
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(frame_width, frame_height)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(fps_limit)
# Set depth source
left = pipeline.createMonoCamera()
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = pipeline.createMonoCamera()
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Configure neural network settings
nn = pipeline.createNeuralNetwork()
nn.setBlobPath(nn_path)
cam_rgb.preview.link(nn.input) # link cam_rgb to nn input layer
# Set rgb output stream
rgb_output_stream = pipeline.createXLinkOut()
rgb_output_stream.setStreamName("rgb")
nn.passthrough.link(rgb_output_stream.input)
# Set depth output stream
left.out.link(depth.left)
right.out.link(depth.right)
# Set neural network output stream
nn_output_stream = pipeline.createXLinkOut()
nn_output_stream.setStreamName("nn")
nn.out.link(nn_output_stream.input)
with dai.Device(pipeline) as device:
spatial_config_input_queue = device.getInputQueue("spatialCalcConfig")
rgb_queue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
nn_queue = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
spatial_calculator_queue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False)
frame = None
startTime = time.monotonic() # To determined FPS
counter = 0
while True:
rgb_current_output = rgb_queue.get()
nn_current_output = nn_queue.get()
if rgb_current_output is not None:
frame = rgb_current_output.getCvFrame()
cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color=(255, 255, 255))
# Process the data thanks to the NNData object
if nn_current_output is not None:
tensor = nn_current_output.getLayerFp16("DetectionOutput") # Get detection tensor (output layer "DetectionOutput" with this model)
frame_process(frame, tensor)
counter += 1
if frame is not None:
cv2.imshow("output", frame)
if cv2.waitKey(1) == ord('q'):
break |
# Created by: Aaron Baker&Elliot Kjerstad
import pyshark
import sys
import os
import time
import multiprocessing
from pathlib import Path
from optparse import OptionParser
import colorama
from colorama import Fore, Back, Style
from classes.Collector import Collector
from classes.Print import Print
from classes.Totals import Totals
from classes.Writer import Writer
from classes.Saver import Saver
from classes.FolderStruct import FolderStruct
#region Option Parse
help_text = """
\tpython3.7 bustaPcap.py [options]
Example:\n
\tpython3.7 bustaPcap.py -p ./single.pcap -q -o
\tpython3.7 bustaPcap.py -d ./dir -q True -o
\tpython3.7 bustaPcap.py -d ./dir -q True -o -q -v
"""
parser = OptionParser(usage=help_text, version="%prog 1.0 -- beta")
parser.add_option("-d", "--DIR", dest="dir_path", metavar="DIR",
help="Usage: -d|--DIR <DIR> Directory path that holds all PCAP files for parsing. Allowed files within are .pcap, .cap, .pcapng")
parser.add_option("-p", "--PCAP", dest="pcap_file", metavar="FILENAME",
help="Usage: -p|--PCAP <PCAPFILE> PCAP File that will be parsed. Include whole destination path: Allowed file types are: .pcap, .cap, .pcapng")
parser.add_option("-q", "--FQDN", dest="do_fqdn", action="store_true",
help="Usage: -q|--FQDN This option finds Fully Qualified Domain Names with each IP found")
parser.add_option("-v", "--VERBOSE", dest="verbose", action="store_true",
help="Usage: -v|--VERBOSE Verbose setting allowing for optional printing to screen")
parser.add_option("-o", "--OUTPUT", dest="save_file", action="store_true",
help="Usage: -o|--OUTPUT This option saves allows to save the output")
options, args = parser.parse_args()
#endregion
#region Usage
def Usage():
parser.print_help()
return
#endregion
#region Print Title
def Print_Title():
print(Fore.LIGHTGREEN_EX + "\n\t::::::::: ::: ::: :::::::: ::::::::::: ::: ::::::::: :::::::: ::: :::::::::")
print("\t:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:")
print("\t+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+")
print("\t+#++:++#+ +#+ +:+ +#++:++#++ +#+ +#++:++#++: +#++:++#+ +#+ +#++:++#++: +#++:++#+")
print("\t+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+")
print("\t#+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+#")
print("\t######### ######## ######## ### ### ### ### ######## ### ### ###\n")
print("\n\t================================================================================================")
print("\t= Zedo & elliotKeen =")
print("\t================================================================================================\n\n" + Style.RESET_ALL)
return
#endregion
#region Arg Checker
def Arg_Check():
if options.pcap_file:
if not options.pcap_file.endswith('.pcap') | options.pcap_file.endswith('.cap') | options.pcap_file.endswith('.pcapng'):
print(Fore.RED + "\t[!] " + Style.RESET_ALL + "File type is not correct")
print(Fore.YELLOW + "\t[-] " + Style.RESET_ALL + "Allowed file types are: .pcap, .cap, .pcapng")
exit()
if options.dir_path:
dir = options.dir_path
if not os.path.isdir(dir):
print(Fore.RED + "\t[!] " + Style.RESET_ALL + "Directory path is not correct")
exit()
if options.do_fqdn:
if options.do_fqdn is True:
options.do_fqdn = True
elif options.do_fqdn is False:
options.do_fqdn = False
else:
print(Fore.RED + "\t[!] " + Stlye.RESET_ALL + "Invalid -q option! Accepts True or False")
exit()
if options.verbose:
if options.verbose is True:
options.verbose = True
elif options.verbose is None:
options.verbose = False
else:
print(Fore.RED + "\t[!] " + Stlye.RESET_ALL + "Invalid -v option! Accepts True or False")
exit()
if not options.pcap_file and not options.dir_path:
print(Fore.RED + "\t[!] " + Style.RESET_ALL + "Please use -p <pcap> or -d <directory>")
exit()
return
#endregion
#region Check Folders
def Check_Folders():
print(Fore.LIGHTGREEN_EX + "\t[-] " + Style.RESET_ALL + "Checking Folders")
folders = FolderStruct(os.path.dirname(os.path.abspath(__file__)))
folders.Check_Folders()
return
#endregion
#region Single PCAP
def Single_PCAP():
now = time.time()
captures = pyshark.FileCapture(options.pcap_file)
folders = FolderStruct(os.path.dirname(os.path.abspath(__file__)))
folders.Create_Report_Folder((os.path.basename(options.pcap_file)).split('.')[0])
capture = Collector(captures, FileName=(os.path.basename(options.pcap_file)), FolderName = os.path.dirname(os.path.abspath(__file__))).Rake()
#caps = Print(capture, options.do_fqdn)
if bool(options.verbose) is True:
Print(capture, options.do_fqdn).Print_All()
print(Fore.LIGHTCYAN_EX + "\n\t [?] " + Fore.LIGHTGREEN_EX + "Total Time Spent: " + Fore.LIGHTYELLOW_EX + "{0:.2f}".format(time.time() - now) + " seconds.." + Style.RESET_ALL)
if options.save_file:
return capture
else:
return None
#endregion
#region Directory PCAP
def Dir_PCAPS():
folders = []
files = []
dir = Path(options.dir_path)
correct_path = Path(dir)
for entry in os.scandir(correct_path):
if entry.is_dir():
folder.append(entry.path)
elif entry.is_file():
files.append(entry.path)
total_collection = Totals()
totaltime = time.time()
now = time.time()
print(Fore.GREEN + "\n\t[+] " + Style.RESET_ALL + "Initializing Dictionary\n")
for pcap in files:
now = time.time()
captures = pyshark.FileCapture(pcap)
file = os.path.basename(pcap)
print(Fore.LIGHTGREEN_EX + "\n\tProcessing File: " + Fore.LIGHTYELLOW_EX + file + Style.RESET_ALL)
capture = Collector(captures, FileName=file, FolderName = os.path.dirname(os.path.abspath(__file__))).Rake()
total_collection.Add_Collector(capture)
print(Fore.LIGHTCYAN_EX + "\n\t [?] " + Style.RESET_ALL + "Time Spent: " + Fore.LIGHTYELLOW_EX + "{0:.2f}".format(time.time() - now) + " seconds.." + Style.RESET_ALL)
if bool(options.verbose) is True:
Print(total_collection, options.do_fqdn).Print_All()
print(Fore.LIGHTCYAN_EX + "\n\t [?] " + Fore.LIGHTGREEN_EX + "Total Time Spent: " + Fore.LIGHTYELLOW_EX + "{0:.2f}".format(time.time() - totaltime) + " seconds.." + Style.RESET_ALL)
if options.save_file:
return total_collection
else:
return None
#endregion
#region Main
def Main():
colorama.init()
Arg_Check()
Print_Title()
Check_Folders()
print(Fore.LIGHTGREEN_EX + "\n\t[-] " + Style.RESET_ALL + "Processing file(s). Please Wait...")
if options.dir_path:
collected = Dir_PCAPS()
else:
collected = Single_PCAP()
if collected is not None:
print(Fore.GREEN + "\n\t[-] " + Fore.LIGHTYELLOW_EX + "Writing to file" + Style.RESET_ALL)
print(Fore.LIGHTGREEN_EX + "\t-----------------" + Style.RESET_ALL)
if type(collected) is Totals:
folder = FolderStruct(os.path.dirname(os.path.abspath(__file__)))
for pkt in collected.All_Collected():
folders = FolderStruct(os.path.dirname(os.path.abspath(__file__)))
folders.Create_Report_Folder(pkt.Get_Name().split('.')[0])
print("\t\t- %s : %s" % ("Saving data from", pkt.Get_Name()))
Saver(pkt, options.do_fqdn, FileName=pkt.Get_Name().split('.')[0], Folders=folders, Path=folders.Get_Path()).Save()
fileWriter = Writer(os.path.basename(options.dir_path), Saver(collected, options.do_fqdn), "a", path = folder.Get_Path())
fileWriter.Save_Totals()
else:
folders = FolderStruct(os.path.dirname(os.path.abspath(__file__)))
folders.Create_Report_Folder(collected.Get_Name().split('.')[0])
Saver(collected, options.do_fqdn, FileName=collected.Get_Name(), Folders=folders, Path=folders.Get_Path()).Save()
return
#endregion
#region Main named if for keyboard interrupt
if __name__ == "__main__":
try:
Main()
except KeyboardInterrupt:
print(Fore.RED + "\n\t[!] Forced Termination!! " + Style.RESET_ALL)
exit()
except Exception as e:
print(e)
finally:
exit()
#endregion |
import os
def get_path(datafile):
thisfile = os.path.abspath(os.path.expanduser(__file__))
return os.path.join(os.path.dirname(thisfile), datafile) |
# coding: utf-8
import sys
import unittest
sys.path.append('../yurlungur')
import yurlungur as yr
from yurlungur.core.env import _Substance
@unittest.skipUnless(_Substance(), "Substance is not found")
class TestSubstance(unittest.TestCase):
@unittest.skip("only runtime")
def test_cmds(self):
with yr.UndoGroup("undo"):
print(dir(yr))
node = yr.Node().create("uniform")
print(node, node.attr("$pixelsize"), node.attrs)
normal = yr.Node().create("normal")
print(normal)
node.connect("unique_filter_output", normal, "inputNodeOutput.connector")
print(node.parent())
node.disconnect("unique_filter_output")
print(node.parent())
@unittest.skip("only runtime")
def test_file(self):
yr.File.open("")
if __name__ == '__main__':
unittest.main()
|
import pytest
from users.models import User
pytestmark = [pytest.mark.django_db]
def get_user():
return User.objects.last()
@pytest.fixture
def lead_data():
return {
'name': 'Monty Python',
'email': 'monty@python.org',
'recaptcha': '__TESTING__',
}
def test_creating(api, lead_data):
api.post('/api/v2/leads/email/eggs/', lead_data, format='multipart')
created = get_user()
assert created.first_name == 'Monty'
assert created.last_name == 'Python'
assert created.email == 'monty@python.org'
def test_creating_response(api, lead_data):
got = api.post('/api/v2/leads/email/eggs/', lead_data, format='multipart')
assert got['ok'] is True
assert got['message'] == 'No spam, only ham'
def test_nameless(api, lead_data):
del lead_data['name']
api.post('/api/v2/leads/email/eggs/', lead_data, format='multipart')
created = get_user()
assert created.email == 'monty@python.org'
def test_emailless_should_fail(api, lead_data):
del lead_data['email']
api.post('/api/v2/leads/email/eggs/', lead_data, format='multipart', expected_status_code=400)
def test_recaptcha_fail(api, lead_data, settings):
settings.DRF_RECAPTCHA_TESTING_PASS = False
got = api.post('/api/v2/leads/email/eggs/', lead_data, format='multipart', expected_status_code=400)
assert 'Error' in got['recaptcha'][0]
|
class Solution:
def lengthOfLongestSubstring(self, s):
i = j = m = 0
t, l = set(), len(s)
while j < l:
if s[j] not in t:
t.add(s[j])
j += 1
m = max(m, len(t))
else:
t.remove(s[i])
i += 1
return m
|
# -*- coding: utf-8 -*-
"""
.. module:: openbread
:platform: Unix, Windows
:synopsis: OPENFPM based brownian reaction dynamics
.. moduleauthor:: openbread team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import numpy as np
from ..utils.constants import AVOGADRO_NUMBER
from ..utils.probabilities import calc_collision_success_probability
class ParticleModelSolution(object):
"""
This class contains the solution of a particle simulation run
result from C++ wrapped data
"""
def __init__(self,results,reactions,species,volume,delta_t,n_log,is_hardsphere=True):
# Save the raw output
self._interface_output = results
delta_t_log = delta_t*n_log
species_dict = {}
for this_species_name, this_species_dict in species.items():
this_species_key = this_species_dict['id']
species_dict[this_species_key] = this_species_name
collison_dict = {}
acceptance_dict = {}
for this_reaction_name, reaction_dict in reactions.items():
if len(reaction_dict['educts']) == 2:
# Calcualte collision key
a = reaction_dict['educts'][0]
b = reaction_dict['educts'][1]
if a < b:
collison_key = (a+b)*(a+b+1)+b
else:
collison_key = (a+b)*(a+b+1)+a
collison_dict[collison_key] = this_reaction_name
if len(reaction_dict['educts']) == 1:
acceptance_key = int((reaction_dict['educts'][0]+1)*1e6)
counter = 0
for product_id in reaction_dict['products']:
acceptance_key += product_id*10**(counter*2)
counter =+ 1
acceptance_dict[acceptance_key] = this_reaction_name
self.species = OrderedDict([])
for this_key in results[0]["species"].keys():
this_species_name = species_dict[this_key]
self.species[this_species_name] = [result['species'][this_key] \
for result in results ]
self.mean_squared_disp = OrderedDict([])
for this_key in results[0]["species"].keys():
this_species_name = species_dict[this_key]
self.mean_squared_disp[this_species_name] = [result['sq_disp'][this_key] \
/ result['species'][this_key] \
if result['species'][this_key] > 0 else 0.0 \
for result in results]
self.collisions = OrderedDict([])
for this_key,this_reaction_name in collison_dict.items():
self.collisions[this_reaction_name] = []
for result in results:
try:
self.collisions[this_reaction_name].append(result['collisions'][this_key])
except IndexError:
self.collisions[this_reaction_name].append(0.0)
self.acceptance = OrderedDict([])
for this_key,this_reaction_name in acceptance_dict.items():
self.acceptance[this_reaction_name] = []
for result in results:
try:
self.acceptance[this_reaction_name].append(
result['acceptance'][this_key]/float(n_log))
except IndexError:
self.acceptance[this_reaction_name].append(0.0)
self.time = [result['time'][0] for result in results ]
# Effective macroscopic rate constans
self.effective_rate_constants = OrderedDict([])
self.error_effective_rate_constants = OrderedDict([])
# Calculate the effective rate constants
for this_reaction_name, reaction_dict in reactions.items():
if len(reaction_dict['educts']) == 2:
n_msrmts = round(len(self.collisions[this_reaction_name])*0.1)
mean_collisions = np.mean(self.collisions[this_reaction_name][n_msrmts:])
sample_size = len(self.collisions[this_reaction_name]) - n_msrmts
first_species_id = reaction_dict['educts'][0]
second_species_id = reaction_dict['educts'][1]
num_first_educt = results[0]['species'][first_species_id]
num_second_educt = results[0]['species'][second_species_id]
possible_collisions = float(num_first_educt*num_second_educt)
total_possible_collisions = possible_collisions*sample_size
first_species_name = species_dict[first_species_id]
second_species_name = species_dict[second_species_id]
sum_diffusion = species[first_species_name]['diff'] \
+ species[second_species_name]['diff']
sum_radius = species[first_species_name]['rad'] \
+ species[second_species_name]['rad']
micro_rate_constant = reaction_dict['rates'][0]
avg_succes = calc_collision_success_probability(micro_rate_constant,
sum_diffusion,
sum_radius,
delta_t,
is_hardsphere)
probablity_estimator = mean_collisions/possible_collisions
scaling_factor = avg_succes/delta_t_log*volume*AVOGADRO_NUMBER
this_effective_rate_constant = probablity_estimator*scaling_factor
correction_factor = np.sqrt( probablity_estimator\
*(1-probablity_estimator)
/total_possible_collisions)
# 95% confidence level for the Wald estimation of the
# confidence interval for binomial distributions
this_error_effective_rate_constants = 1.96*correction_factor
# TODO Implemention using a proper unit converter
# Scale to 1/Ms assumed input in mu^3/s
self.effective_rate_constants[this_reaction_name] = \
this_effective_rate_constant
self.error_effective_rate_constants[this_reaction_name] = \
this_error_effective_rate_constants
if len(reaction_dict['educts']) == 1:
micro_rate_constant = reaction_dict['rates'][0]
n_msrmts = round(len(self.acceptance[this_reaction_name])*0.1)
avg_succes = np.mean(self.acceptance[this_reaction_name][n_msrmts:])
this_effective_rate_constant = micro_rate_constant*avg_succes
self.effective_rate_constants[this_reaction_name] = \
this_effective_rate_constant
|
from flask import Flask
from flask import request
from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String, Float
from flask_marshmallow import Marshmallow
from flask_jwt_extended import JWTManager,jwt_required,create_access_token
from flask_mail import Mail, Message
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///'+os.path.join(basedir,'plantes.db')
app.config['JWT_SECRET_KEY'] = 'super-secret'
app.config['MAIL_SERVER']='smtp.mailtrap.io'
app.config['MAIL_PORT'] = 2525
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
jwt = JWTManager(app)
mail = Mail(app)
# commands for command line
@app.cli.command('db_create')
def db_create():
db.create_all()
print('Database created!')
@app.cli.command('db_drop')
def db_drop():
db.drop_all()
print('Database dropped')
@app.cli.command('db_seed')
def db_seed():
mercury = Planet(planet_name = 'Mercury',
planet_type = 'Class D',
home_star = 'Sol',
mass =3.258e23,
radius = 1516,
distance = 35.98e6)
venus = Planet(planet_name='Venus',
planet_type='Class K',
home_star='Sol',
mass=4.867e24,
radius=3760,
distance=67.24e6)
earth = Planet(planet_name='Earth',
planet_type='Class K',
home_star='Sol',
mass=5.972e24,
radius=3959,
distance=92.96e6)
db.session.add(mercury)
db.session.add(venus)
db.session.add(earth)
test_user = User(first_name = 'William',
last_name = 'Herschel',
email = 'test@test.com',
password = 'Password')
db.session.add(test_user)
db.session.commit()
print("Database seeded")
@app.route('/planets',methods=['GET'])
def planets():
planets_list = Planet.query.all()
result = planets_schema.dump(planets_list)
print(result)
return jsonify(result)
@app.route('/register',methods=['POST'])
def register():
email = request.form['email']
test = User.query.filter_by(email=email).first()
if test:
return jsonify(message="Email already exist"), 409
else:
first_name = request.form['first_name']
last_name = request.form['last_name']
password = request.form['password']
user = User(first_name=first_name,last_name=last_name,email=email,password=password)
db.session.add(user)
db.session.commit()
return jsonify(message='User created Succesfully'), 201
@app.route('/')
def hello_world():
return jsonify(message="hello its a planetary api"),200
@app.route('/not_found')
def test_not_found():
return jsonify(message="not found"), 404
@app.route('/parameters')
def parameters():
name = request.args.get('name')
age = int(request.args.get('age'))
if age<18:
return jsonify(message="sorry "+name+" your are not old enough"),401
else:
return jsonify(message="Welcome "+name+" your are old enough"),200
@app.route('/url_variable/<string:name>/<int:age>')
def url_variables(name:str,age:int):
if age < 18:
return jsonify(message="sorry " + name + " your are not old enough"), 401
else:
return jsonify(message="Welcome " + name + " your are old enough"), 200
@app.route('/login',methods=['POST'])
def login():
if request.is_json:
email = request.json['email']
password = request.json['password']
else:
email = request.form['email']
password = request.form['password']
test = User.query.filter_by(email=email,password=password).first()
if test:
access_token = create_access_token(identity=email)
return jsonify(message='Login Succeeded',access_token=access_token)
else:
return jsonify(message='Bad email or password'),401
@app.route('/retrieve_password/<string:email>',methods=['GET'])
def retrieve_password(email:str):
user = User.query.filter_by(email=email).first()
if user:
msg = Message("Your planetary API password is "+ user.password,
sender="admin@planetary-api.com",
recipients=[email])
mail.send(message=msg)
return jsonify(message="Password send to "+email)
else:
return jsonify(message="That email doesn't exist")
@app.route('/planet_details/<int:planet_id>',methods=['GET'])
def planet_details(planet_id:int):
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
result = planet_schema.dump(planet)
return jsonify(result)
else:
return jsonify(message="That planet not exist"),404
@app.route('/add_planet',methods=['POST'])
@jwt_required
def add_planet():
planet_name = request.form['planet_name']
test = Planet.query.filter_by(planet_name=planet_name).first()
if test:
return jsonify("there is already a planet with that name"),409
else:
planet_name = request.form['planet_name']
planet_type = request.form['planet_type']
home_star = request.form['home_star']
mass = float(request.form['mass'])
radius = float(request.form['radius'])
distance = float(request.form['distance'])
new_planet = Planet(planet_name=planet_name,
planet_type=planet_type,
home_star=home_star,
mass=mass,
distance=distance)
db.session.add(new_planet)
db.session.commit()
return jsonify(message="You Added a planet"),201
@app.route('/update_planet',methods=['POST'])
def update_planet():
planet_id = int(request.form['planet_id'])
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
planet.planet_name = request.form['planet_name']
planet.planet_type = request.form['planet_type']
planet.home_star = request.form['home_star']
planet.mass = float(request.form['mass'])
planet.radius = float(request.form['radius'])
planet.distance = float(request.form['distance'])
db.session.commit()
return jsonify(message="you updated the planet"),202
else:
return jsonify(message="The planet does not exist"),404
@app.route('/remove_planet/<int:planet_id>',methods=['DELETE'])
def remove_planet(planet_id:int):
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
db.session.delete(planet)
db.session.commit()
return jsonify(message="You deleted the planet"),202
else:
return jsonify(message="The planed does not exist"),404
# database models
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer,primary_key=True)
first_name = Column(String)
last_name = Column(String)
email = Column(String)
password = Column(String)
class Planet(db.Model):
__tablename__ = 'planets'
planet_id = Column(Integer,primary_key=True)
planet_name = Column(String)
planet_type = Column(String)
home_star = Column(String)
mass = Column(Float)
radius = Column(Float)
distance = Column(Float)
class UserSchema(ma.Schema):
class Meta:
fields = ('id','first_name','last_name','email','password')
class PlanetSchema(ma.Schema):
class Meta:
fields = ('planet_id','planet_name','planet_type','home_star','mass','radius','distance')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
planet_schema = PlanetSchema()
planets_schema = PlanetSchema(many=True)
if __name__=='__main__':
app.run() |
import datetime
import socket
import socketserver
import struct
from json import dumps
from time import time
import pymongo
from contentmanager import BoardManager, MailManager, PostManager
from user import User
from utils import *
class Server(socketserver.StreamRequestHandler):
def __init__(self, *args):
self.function = {
"register": self.register,
"login": self.login,
"logout": self.logout,
"whoami": self.whoami,
"create-board": self.create_board,
"create-post": self.create_post,
"list-board": self.list_board,
"list-post": self.list_post,
"read": self.read,
"delete-post": self.delete_post,
"update-post": self.update_post,
"mail-to": self.mailto,
"list-mail": self.listmail,
"retr-mail": self.retrieve_mail,
"delete-mail": self.delete_mail,
"comment": self.comment
}
super().__init__(*args)
def reply(self, response, *args):
if args:
response = response.format(*args)
if isinstance(response, dict):
response = dumps(response)
if isinstance(response, str):
response = response.encode()
self.wfile.write(struct.pack('<H', len(response)) + response)
def recv_command(self):
try:
length = struct.unpack('<H', self.rfile.read(2))[0]
except (struct.error, OSError):
return "exit"
return self.rfile.read(length).decode()
def register(self):
username = self.commands[0]
password = self.commands[2]
ret = {
"type": "register",
"bucket_name": "0716061-{}-{}".format(username.lower(), int(time())),
"success": False
}
if self.user.register(username, password, ret['bucket_name']):
ret['msg'] = "Register successfully."
ret['success'] = True
else:
ret['bucket_name'] = None
ret['msg'] = "Username is already used."
self.reply(ret)
def login(self):
ret = {
"type": "login",
"bucket_name": None,
"success": False}
if self.user.is_unauthorized():
username, password = self.commands
if self.user.login(username, password):
ret['bucket_name'] = self.user.bucket_name
ret['msg'] = "Welcome, {}.".format(username)
ret['success'] = True
else:
ret['msg'] = "Login failed."
else:
ret['msg'] = "Please logout first."
self.reply(ret)
def logout(self):
ret = {}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
ret['msg'] = "Bye, {}.".format(self.user.username)
self.user.logout()
self.reply(ret)
def whoami(self):
ret = {}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
ret['msg'] = self.user.username
self.reply(ret)
def create_board(self):
ret = {}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
if self.board.not_exist(self.commands[0]):
document = {"board_name": self.commands[0],
"mod": self.user.username
}
self.board.add_board(document)
ret['msg'] = "Create board successfully."
else:
ret['msg'] = "Board already exist."
self.reply(ret)
def create_post(self):
ret = {
"type": "create_post",
"bucket_name": None,
"success": False
}
board_name = self.commands[0]
extracted = extract_post(self.raw_command)
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
elif self.board.not_exist(board_name):
ret['msg'] = "Board does not exist."
else:
title = extracted.group(1)
content = extracted.group(2).replace('<br>', '\r\n')
ret['bucket_name'] = self.user.bucket_name
ret['success'] = True
ret['msg'] = "Create post successfully."
ret['content'] = content
date = list(datetime.datetime.now(TIMEZONE).timetuple()[:3])
document = {'board_name': board_name,
'title': title,
'owner': self.user.username,
'date': date,
'post_id': None,
'bucket_name': self.user.bucket_name
}
ret['id'] = self.post.add_post(document)
self.reply(ret)
def list_board(self):
output = ['Index\tName\tModerator']
extracted = extract_keyword(self.raw_command)
document = {}
if extracted is not None:
keyword = extracted.group(1)
document["board_name"] = {"$regex": keyword}
for idx, doc in enumerate(self.board.list_all(document), start=1):
output.append('{}\t{}\t{}'.format(
idx,
doc['board_name'],
doc['mod']))
self.reply({"msg": '\n'.join(output)})
def list_post(self):
board_name = self.commands[0]
if self.board.not_exist(board_name):
self.reply({"msg": "Board does not exist."})
return
output = ['ID\tTitle\tAuthor\tDate']
extracted = extract_keyword(self.raw_command)
document = {"board_name": board_name}
if extracted is not None:
keyword = extracted.group(1)
document["title"] = {"$regex": keyword}
for doc in self.post.list_all(document):
output.append('{}\t{}\t{}\t{:02d}/{:02d}'.format(
doc['post_id'],
doc['title'],
doc['owner'],
*doc['date'][1:]))
self.reply({"msg": '\n'.join(output)})
def read(self):
ret = {
"type": "read",
"success": False
}
postid = int(self.commands[0])
if self.post.not_exist(postid):
ret['msg'] = "Post does not exist."
else:
doc = self.post.read(postid)
cmts = []
head = "Author\t:{}\r\nTitle\t:{}\r\nDate\t:{:04d}-{:02d}-{:02d}".format(
doc['owner'],
doc['title'],
*doc['date']
)
for comment in self.post.list_comment(postid):
cmts.append([comment['bucket_name'],
comment['key'],
comment['owner']])
ret['comments'] = cmts
ret['msg'] = head
ret['id'] = postid
ret["bucket_name"] = doc['bucket_name']
ret['success'] = True
self.reply(ret)
def delete_post(self):
ret = ret = {
"type": "delete_post",
"bucket_name": None,
"success": False}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
postid = int(self.commands[0])
if self.post.not_exist(postid):
ret['msg'] = "Post does not exist."
else:
document = {
"post_id": postid,
"owner": self.user.username
}
delete_result = self.post.delete(document)
if delete_result[0]:
ret['bucket_name'] = self.user.bucket_name
ret['success'] = True
ret['id'] = postid
ret['comments'] = delete_result[1]
ret['msg'] = "Delete successfully."
else:
ret['msg'] = "Not the post owner."
self.reply(ret)
def update_post(self):
ret = {
"type": "update_post",
"bucket_name": None,
"success": False}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
postid = int(self.commands[0])
if self.post.not_exist(postid):
ret['msg'] = "Post does not exist."
else:
title, content = extract_title_content(self.raw_command)
document = {"post_id": postid,
"owner": self.user.username
}
if title is not None:
result = self.post.update(
document, {"$set": {"title": title.group(1)}})
if result:
ret['msg'] = "Update successfully."
else:
ret['msg'] = "Not the post owner."
else:
try:
next(self.post.list_all(document))
except StopIteration:
ret['msg'] = "Not the post owner."
else:
ret['bucket_name'] = self.user.bucket_name
ret['msg'] = "Update successfully."
ret['success'] = True
ret['content'] = content.group(
1).replace('<br>', '\r\n')
ret['id'] = postid
self.reply(ret)
def comment(self):
ret = {
"type": "comment",
"bucket_name": None,
"success": False
}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
postid = int(self.commands[0])
if self.post.not_exist(postid):
ret['msg'] = "Post does not exist."
else:
comment = extract_comment(self.raw_command)
if comment is not None:
t = str(int(time()))
document = {"post_id": postid,
"owner": self.user.username,
"bucket_name": self.user.bucket_name,
"key": str(postid) + '_' + t}
self.post.comment(document)
ret['success'] = True
ret['bucket_name'] = self.user.bucket_name
ret['msg'] = "Comment successfully."
ret['content'] = comment.group(1)
ret['id'] = str(postid)
ret['key'] = '_' + t
self.reply(ret)
def mailto(self):
ret = {
"type": "mailto",
"bucket_name": None,
"success": False
}
username = self.commands[0]
extracted = extract_mail(self.raw_command)
subject = extracted.group(1)
content = extracted.group(2).replace('<br>', '\r\n')
date = list(datetime.datetime.now(TIMEZONE).timetuple()[:3])
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
bkt_name = self.user.get_bucket(username)
if bkt_name is None:
ret['msg'] = username + " does not exist."
else:
seq = self.mail.mailto(
self.user.username, username, subject, date)
ret['msg'] = "Sent successfully."
ret['success'] = True
ret['bucket_name'] = bkt_name
ret['content'] = content
ret['key'] = 'mail_' + str(seq)
self.reply(ret)
def listmail(self):
if self.user.is_unauthorized():
self.reply({'msg': "Please login first."})
else:
out = ["ID\tSubject\tFrom\tDate"]
mails = self.mail.list_all(self.user.username)
for idx, mail in enumerate(mails, start=1):
out.append('{}\t{}\t{}\t{:02d}/{:02d}'.format(
idx,
mail['subject'],
mail['from'],
*mail['date'][1:]
))
self.reply({'msg': '\n'.join(out)})
def retrieve_mail(self):
mail_num = int(self.commands[0])
ret = {
"type": "retrieve_mail",
"bucket_name": None,
"success": False
}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
mail_metadata = self.mail.exist(self.user.username, mail_num)
if mail_metadata is None:
ret['msg'] = "No such mail."
else:
ret['bucket_name'] = self.user.bucket_name
ret['success'] = True
ret['msg'] = 'Subject :' + mail_metadata['subject']
ret['from'] = mail_metadata['from']
ret['date'] = '{:04d}-{:02d}-{:02d}'.format(
*mail_metadata['date'])
ret['key'] = "mail_" + str(mail_num)
self.reply(ret)
def delete_mail(self):
mail_num = int(self.commands[0])
ret = {
"type": "delete_mail",
"bucket_name": None,
"success": False
}
if self.user.is_unauthorized():
ret['msg'] = "Please login first."
else:
isdeleted = self.mail.delete(self.user.username, mail_num)
if isdeleted:
ret['bucket_name'] = self.user.bucket_name
ret['success'] = True
ret['key'] = 'mail_' + str(mail_num)
ret['msg'] = 'Mail deleted.'
else:
ret['msg'] = "No such mail."
self.reply(ret)
def handle(self):
print("New connection.")
print(ONLINE.format(*self.client_address))
self.reply(''.join(WELCOME))
mongoclient = pymongo.MongoClient()
self.user = User(mongoclient)
self.board = BoardManager(mongoclient)
self.post = PostManager(mongoclient)
self.mail = MailManager(mongoclient)
while True:
self.raw_command = self.recv_command()
self.commands = self.raw_command.split()
if not self.commands:
continue
if self.commands[0] == "exit":
print(OFFLINE.format(*self.client_address))
break
func = self.function.get(self.commands[0])
del self.commands[0]
if func is None:
error("Unknown command:", self.commands)
self.reply('')
else:
func()
self.request.shutdown(socket.SHUT_RDWR)
self.request.close()
|
import os
from parallelHillClimber import PARALLEL_HILL_CLIMBER
phc = PARALLEL_HILL_CLIMBER()
phc.Evolve()
phc.Show_Best()
|
"""
make_train_val_test_splits.py
Load a csv file into a dataframe containing Leavesdb metadata, split it into train, val, and test subsets, then write to new location as csv files.
Created on: Wednesday April 6th, 2022
Created by: Jacob A Rose
python "/media/data_cifs/projects/prj_fossils/users/jacob/github/image-utils/imutils/big/make_train_val_test_splits.py" \
--root_dir "/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1" \
--label_col "family" \
--splits "0.5,0.2,0.3" \
--seed 14 \
--run_all
"""
import argparse
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
## Can we stratify by genus or Family while classifying species?
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
import os
from typing import *
import json
import pandas as pd
from pathlib import Path
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from pprint import pprint as pp
# from imutils.big.make_herbarium_2022_catalog_df import (read_all_from_csv,
# read_train_df_from_csv,
# read_test_df_from_csv)
from imutils import catalog_registry
from imutils.ml.utils import template_utils
log = template_utils.get_logger(__file__)
# def train_val_split(df: pd.DataFrame,
# label_col = "scientificName",
# train_size=0.7,
# seed = 14
# ) -> Tuple[pd.DataFrame]:
# num_samples = df.shape[0]
# x = np.arange(num_samples)
# y = df[label_col].values
# x_train, x_val, _, _ = train_test_split(x, y,
# stratify=y,
# train_size=train_size,
# random_state=seed)
# train_data = df.iloc[x_train,:]
# val_data = df.iloc[x_val,:]
# return train_data, val_data
####################################################
def trainvaltest_split(df: pd.DataFrame,
label_col = "family",
splits: List[float]=(0.5, 0.2, 0.3),
seed = 14,
stratify: bool=True
) -> Dict[str,pd.DataFrame]:
"""
Wrapper function to split data into 3 stratified subsets specified by `splits`.
User specifies absolute fraction of total requested for each subset (e.g. splits=[0.5, 0.2, 0.3])
Function calculates adjusted fractions necessary in order to use sklearn's builtin train_test_split function over a sequence of 2 steps.
Step 1: Separate test set from the rest of the data (constituting the union of train + val)
Step 2: Separate the train and val sets from the remainder produced by step 1.
Output:
Dict: {'train':(x_train, y_train),
'val':(x_val_y_val),
'test':(x_test, y_test)}
Example:
>> data = torch.data.Dataset(...)
>> y = data.targets
>> data_splits = trainvaltest_split(x=None,
y=y,
splits=(0.5, 0.2, 0.3),
random_state=0,
stratify=True)
"""
assert len(splits) == 3, "Must provide eactly 3 float values for `splits`"
assert np.isclose(np.sum(splits), 1.0), f"Sum of all splits values {splits} = {np.sum(splits)} must be 1.0"
train_split, val_split, test_split = splits
val_relative_split = val_split/(train_split + val_split)
train_relative_split = train_split/(train_split + val_split)
if stratify and (label_col is None):
raise ValueError("If label_col is not provided, stratify must be set to False.")
num_samples = df.shape[0]
x = np.arange(num_samples)
y = df[label_col].values
# y = np.array(y)
# if x is None:
# x = np.arange(len(y))
# else:
# x = np.array(x)
stratify_y = y if stratify else None
x_train_val, x_test, y_train_val, y_test = train_test_split(x, y,
test_size=test_split,
random_state=seed,
stratify=y)
stratify_y_train = y_train_val if stratify else None
x_train, x_val, y_train, y_val = train_test_split(x_train_val, y_train_val,
test_size=val_relative_split,
random_state=seed,
stratify=y_train_val)
x = np.concatenate((x_train, x_val, x_test)).tolist()
assert len(set(x)) == len(x), f"[Warning] Check for possible data leakage. len(set(x))={len(set(x))} != len(x)={len(x)}"
train_data = df.iloc[x_train,:]
val_data = df.iloc[x_val,:]
test_data = df.iloc[x_test,:]
log.debug(f"x_train.shape={x_train.shape}, y_train.shape={y_train.shape}")
log.debug(f"x_val.shape={x_val.shape}, y_val.shape={y_val.shape}")
log.debug(f"x_test.shape={x_test.shape}, y_test.shape={y_test.shape}")
log.debug(f'Absolute splits: {[train_split, val_split, test_split]}')
log.debug(f'Relative splits: [{train_relative_split:.2f}, {val_relative_split:.2f}, {test_split}]')
return {"train":train_data,
"val":val_data,
"test":test_data}
def fit_and_encode_labels(train_data,
val_data,
test_data=None,
label_col: str="family"
) -> Tuple[LabelEncoder, pd.DataFrame]:
encoder = LabelEncoder()
encoder.fit(train_data[label_col])
split_data = {}
train_data = train_data.assign(
y = encoder.transform(train_data[label_col])
).astype({"y":"category"})
split_data["train"] = train_data
val_data = val_data.assign(
y = encoder.transform(val_data[label_col])
).astype({"y":"category"})
split_data["val"] = val_data
if test_data is not None:
if label_col in test_data.columns:
test_data = test_data.assign(
y = encoder.transform(test_data[label_col])
).astype({"y":"category"})
split_data["test"] = test_data
return encoder, split_data
def save_label_encoder(encoder,
root_dir: str,
label_col: str) -> str:
label_name = f"{label_col}-encoder.pkl"
label_encoder_path = Path(root_dir, label_name)
with open(label_encoder_path, mode="wb") as fp:
pickle.dump(encoder, fp)
return label_encoder_path
def read_label_encoder(label_encoder_path) -> LabelEncoder:
with open(label_encoder_path, mode="rb") as fp:
loaded_encoder = pickle.load(fp)
return loaded_encoder
def format_output_cols(df: pd.DataFrame):
col_order = ['path', 'y', "family", "genus", "species", "collection", "catalog_number"]
col_order = [col for col in col_order if col in df.columns]
return df[col_order]
def optimize_dtypes(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for Leavesdb metadata df.
"""
# Reduce total df size by optimizing dtypes per column
cat_cols = ['y', "family", "genus", "species", "collection"]
if "y" in df.columns:
cat_cols.append("y")
str_cols = ['path', "catalog_number"]
col_dtypes = {c:"category" for c in cat_cols if c in df.columns}
col_dtypes.update({c:"string" for c in str_cols})
# import pdb;pdb.set_trace()
df = df.astype(col_dtypes)
return df
def read_df_from_csv(path,
nrows: Optional[int]=None,
index_col=None
) -> pd.DataFrame:
df = pd.read_csv(path, index_col=index_col, nrows=nrows)
df = optimize_dtypes(df)
return df
def make_splits(df: pd.DataFrame,
label_col = "scientificName",
splits: List[float]=(0.5, 0.2, 0.3),
seed = 14
) -> Tuple[LabelEncoder, pd.DataFrame]:
split_dfs = trainvaltest_split(df=df,
label_col=label_col,
splits=splits,
seed=seed,
stratify=True)
encoder, split_dfs = fit_and_encode_labels(train_data=split_dfs["train"],
val_data=split_dfs["val"],
test_data=split_dfs["test"],
label_col=label_col)
split_dfs["train"] = format_output_cols(split_dfs["train"])
split_dfs["val"] = format_output_cols(split_dfs["val"])
split_dfs["test"] = format_output_cols(split_dfs["test"])
return encoder, split_dfs
def make_encode_save_splits(source_csv_path: str, #DATA_DIR,
save_dir: str, #=None,
label_col: str="family",
splits: List[float]=(0.5, 0.2, 0.3),
seed = 14):
save_dir = Path(save_dir)
os.makedirs(save_dir, exist_ok=True)
df = read_df_from_csv(path=source_csv_path)
encoder, split_dfs = make_splits(df=df,
label_col = label_col,
splits=splits,
seed=seed)
label_encoder_path = save_label_encoder(encoder=encoder,
root_dir=save_dir,
label_col=label_col)
split_dfs["train"].to_csv(save_dir / "train_metadata.csv")
split_dfs["val"].to_csv(save_dir / "val_metadata.csv")
split_dfs["test"].to_csv(save_dir / "test_metadata.csv")
return {"label_encoder":encoder,
"subsets":{**split_dfs}
}
# def read_train_df_from_csv(train_path,
# nrows: Optional[int]=None
# ) -> pd.DataFrame:
# df = pd.read_csv(train_path, index_col=0, nrows=nrows)
# df = optimize_dtypes_train(df)
# return df
# def read_test_df_from_csv(test_path,
# nrows: Optional[int]=None
# ) -> pd.DataFrame:
# df = pd.read_csv(test_path, index_col=0, nrows=nrows)
# df = optimize_dtypes_test(df)
# return df
def find_label_encoder_path(source_dir: str, label_col: Optional[str]=None) -> Path:
"""
Parse the contents of source_dir and return the full path of the encoder file, by filtering for files with "encoder" in the name.
If label_col is not specified and function locates 2 or more files with "encoder" in the name, then raises an error.
"""
file_name = [f for f in os.listdir(source_dir) if "encoder" in f]
if isinstance(label_col, str):
file_name = [f for f in file_name if label_col in f]
if len(file_name)==0:
return None
assert len(file_name) == 1, "Warning: found ambiguous label encoder files in data splits directory or failed to specify label_col for dataset w/ more than 1 encoder. Please inspect contents of directory and try again.\n" + f"{source_dir}"
file_name = file_name[0]
return Path(source_dir, file_name)
def find_data_splits_dir(source_dir: str,
splits: List[float]=(0.5, 0.2, 0.3)
) -> Path:
"""
Given a base path of `source_dir`, construct the correct data split dir path using chosen train_size.
"""
splits_subdir = f"splits=({splits[0]:.1f},{splits[1]:.1f},{splits[2]:.1f})"
if splits_subdir in str(source_dir):
return source_dir
out_dir = Path(source_dir)
if ("splits" in os.listdir(str(out_dir))) or ("splits" not in str(out_dir)):
out_dir = out_dir / "splits"
out_dir = out_dir / splits_subdir
return out_dir
def check_already_built(splits_dir: str, label_col: str="family") -> bool:
"""
Checks splits_dir for completed set of files as evidence that previous run was successful.
"""
splits_dir = Path(splits_dir)
label_encoder_path = find_label_encoder_path(splits_dir, label_col=label_col)
try:
assert os.path.isfile(str(label_encoder_path))
assert os.path.isfile(splits_dir / "train_metadata.csv")
assert os.path.isfile(splits_dir / "val_metadata.csv")
assert os.path.isfile(splits_dir / "test_metadata.csv")
return True
except AssertionError:
return False
def read_encoded_splits(source_dir: str,
label_encoder_path: str=None,
label_col: str="family",
include=["train","val","test"],
index_col: int=None):
source_dir = Path(source_dir)
if label_encoder_path is None:
label_encoder_path = find_label_encoder_path(source_dir, label_col=label_col)
encoder = read_label_encoder(label_encoder_path=label_encoder_path)
data = {"label_encoder":encoder,
"subsets":{}}
if "train" in include:
data["subsets"]["train"] = read_df_from_csv(source_dir / "train_metadata.csv", index_col=index_col)
if "val" in include:
data["subsets"]["val"] = read_df_from_csv(source_dir / "val_metadata.csv", index_col=index_col)
if "test" in include:
data["subsets"]["test"] = read_df_from_csv(source_dir / "test_metadata.csv", index_col=index_col)
return data
def main(source_dir: str, #=DATA_DIR,
splits_dir: Optional[str]=None,
label_col: str="family",
splits: List[float]=(0.5, 0.2, 0.3),
seed: int=14):
"""
If splits already exist on disk, read and return their contents.
If they dont, read original herbarium train metadata, apply train val split, and write contents to new directory
Returns a fitted LabelEncoder object + 3 DataFrames,
- The train & val DataFrames contain labeled/supervised datasets
- The test DataFrames contain unlabeled/unsupervised datasets
"""
# if splits_dir is None:
# splits_dir = Path(source_dir, "splits", f"train_size-{train_size}")
# os.makedirs(splits_dir, exist_ok=True)
# import pdb; pdb.set_trace()
if True: #not check_already_built(splits_dir):
print(f"Making & saving train-val-test splits in the following directory:",
'\n' + str(splits_dir))
if not os.path.exists(splits_dir):
print(f"Creating directory structure")
os.makedirs(splits_dir, exist_ok=True)
csv_filename = [f for f in os.listdir(source_dir) if f.endswith(".csv")][0]
source_csv_path = os.path.join(source_dir, csv_filename)
print(f"Reading from: {csv_filename}")
data = make_encode_save_splits(source_csv_path=source_csv_path,
save_dir=splits_dir,
label_col=label_col,
splits=splits,
seed=seed)
return data
else:
print(f"Already completed previous run. train-val-test splits are in the following directory:",
'\n' + str(splits_dir))
return None
# HERBARIUM_ROOT = "/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize"
# WORKING_DIR = "/media/data/jacob/GitHub/image-utils/notebooks/herbarium_2022/"
# OUTPUT_DIR = os.path.join(WORKING_DIR, "outputs")
# DATA_DIR = os.path.join(WORKING_DIR, "data")
# from dotenv import load_dotenv
# load_dotenv()
# HERBARIUM_ROOT_DEFAULT = os.environ.get("HERBARIUM_ROOT_DEFAULT")
# CATALOG_DIR = os.environ.get("CATALOG_DIR")
# SPLITS_DIR = os.environ.get("SPLITS_DIR")
CATALOG_ROOT_DIR = "/media/data_cifs/projects/prj_fossils/users/jacob/data/leavesdb-v1_1"
available_catalogs = [
# 'Extant_Leaves_1024',
# 'Extant_Leaves_512',
'Extant_Leaves_family_100_1024',
'Extant_Leaves_family_100_512',
'Extant_Leaves_family_10_1024',
'Extant_Leaves_family_10_512',
'Extant_Leaves_family_3_1024',
'Extant_Leaves_family_3_512',
# 'Fossil_1024',
# 'Fossil_512',
'Fossil_family_3_1024',
'Fossil_family_3_512',
'PNAS_family_100_1024',
'PNAS_family_100_512'
]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser("""Generate train-val-test splits dataset from Leavesdb v1.1 catalogs.""")
parser.add_argument(
"--root_dir", default=CATALOG_ROOT_DIR, help="Root directory where inidividual datasets each have their own subdir, within which lie the catalog files."
)
parser.add_argument(
"--sub_dir", default="Extant_Leaves_family_10_512", help="Root directory where inidividual datasets each have their own subdir, within which lie the catalog files."
)
parser.add_argument(
"--splits_dir",
default=None, #SPLITS_DIR,
help="Target directory in which to save the csv files for each split. ",
)
parser.add_argument(
"--label_col",
default="family", #SPLITS_DIR,
help="The column to encode as labels & use for stratification.",
)
parser.add_argument(
"--splits",
default="0.5,0.2,0.3",
type=str,
help="3 floats representing the train, val, and test fractions of the data catalogs to be made.",
)
parser.add_argument(
"--seed", default=14, type=int, help="Random seed."
)
parser.add_argument(
"--run_all", action="store_true", help="Flag to create splits for all subdirs in args.root_dir."
)
parser.add_argument(
"--info", action="store_true", help="Flag to print execution variables then quit without execution."
)
parser.add_argument(
"--force_overwrite", action="store_true", help="Flag to allow removal of pre-existing output files if they already exist, instead of skipping creation during execution."
)
args = parser.parse_args()
args.splits = tuple(float(frac) for frac in args.splits.split(","))
if args.info:
print("User passed --info, displaying execution args then exiting")
pp(args)
sys.exit(0)
assert os.path.isdir(args.root_dir)
return args
if __name__ == "__main__":
args = parse_args()
if args.run_all:
print(f"--run_all was passed, generating train-val-test splits for {len(available_catalogs)} datasets.")
for sub_dir in tqdm(available_catalogs):
source_dir = os.path.join(args.root_dir, sub_dir)
args.splits_dir = find_data_splits_dir(source_dir=source_dir,
splits=args.splits)
print(f"Using args.splits_dir: {args.splits_dir}")
os.makedirs(args.splits_dir, exist_ok=True)
main(source_dir=source_dir,
splits_dir=args.splits_dir,
label_col=args.label_col,
splits=args.splits,
seed=args.seed)
else:
source_dir = os.path.join(args.root_dir, args.sub_dir)
if args.splits_dir is None:
args.splits_dir = find_data_splits_dir(source_dir=source_dir,
splits=args.splits)
print(f"Using args.splits_dir: {args.splits_dir}")
os.makedirs(args.splits_dir, exist_ok=True)
main(source_dir=source_dir,
splits_dir=args.splits_dir,
label_col=args.label_col,
splits=args.splits,
seed=args.seed) |
"""Tests for the object inspection functionality.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
# Third-party imports
import nose.tools as nt
# Our own imports
from .. import oinspect
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
inspector = oinspect.Inspector()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# A few generic objects we can then inspect in the tests below
class Call(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
def f(x, y=2, *a, **kw):
"""A simple function."""
def g(y, z=3, *a, **kw):
pass # no docstring
def check_calltip(obj, name, call, docstring):
"""Generic check pattern all calltip tests will use"""
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
|
import sys
import random
ans = True
while ans:
pergunta = input("Pergunte algo para Magic8Ball: (Pressione enter para sair) ")
resposta = random.randint(1,8)
if pergunta == '':
sys.exit()
elif resposta == 1:
print ('Com Certeza')
elif resposta == 2:
print('Perspectiva Boa')
elif resposta == 3:
print('Você pode contar com ele')
elif resposta == 4:
print('Pergunte novamente mais tarde')
elif resposta == 5:
print('Concentre-se e pergunte novamente')
elif resposta == 6:
print('Resposta nebuloza, tente denovo')
elif resposta == 7:
print('Minha resposta é não')
elif resposta == 8:
print('Minhas fontes dizer que não')
|
# Copyright 2020 The Nadi Data Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import readtextfile
import readjsonfile
import readparquetfile
import writeavrofile
import writetextfile
import writeparquetfile
import ray
from timeit import default_timer as timer
import time
import pandas as pd
import dask.dataframe as dd
import uuid
if ray.is_initialized:
ray.init(address="auto")
df = readtextfile.ReadTextFile(ipfile='/tmp/data/5m_Sales_Records.csv',
ipschemafile='/Users/sriyan/Documents/dataprocessor/schema/sample_csv_file.schema',
delimiter=',',
skiprows=1,
parallel=6).read_using_dask()
class Utils():
def __init__(self):
pass
def parse_dates_using_lookup(self, date_column, format_to='%Y%m%d', infer_datetime_format=False):
"""
This is an extremely fast approach to datetime parsing.
For large data, the same dates are often repeated. Rather than
re-parse these, we store all unique dates, parse them, and
use a lookup to convert all dates.
"""
#dates = {date:pd.to_datetime(date, format=self.format_to) for date in self.date_column.unique()}
dates = {date:pd.to_datetime(date, infer_datetime_format=infer_datetime_format) for date in date_column.unique()}
return date_column.map(dates)
def define_partitions(self, seq, num_of_partitions):
"""
This needs to be modified to redice skew
"""
m = num_of_partitions
n,b,newseq=len(seq),0,[]
for k in range(m):
a, b = b, b + (n+k)//m
newseq.append(seq[a:b])
return newseq
@ray.remote
class Pipeline():
def transform(self, df, partition_no):
'''All transformations before repartition'''
start = timer()
df = df.get_partition(partition_no).compute()
#df['Order_Date'] = Utils().parse_dates_using_lookup(df['Order_Date'])
#df['Ship_Date'] = Utils().parse_dates_using_lookup(df['Ship_Date'])
#df = df[df['Region'] == 'Europe']
print("duration =", timer() - start, " seconds for transform")
return df
def re_partition_sort_data(self, df, partition_metadata, partition_keys, sort_keys, partition_no):
start = timer()
'''Prepare partitions based on partition metadata'''
#df = df[df[partition_keys].isin(partition_metadata)]
if sort_keys is not None:
'''Sort data based on sort keys'''
df.sort_values(by=sort_keys)
print("duration =", timer() - start, " seconds for preparing partition based on keys")
start = timer()
filename = "/tmp/data/sample_textfile." + str(partition_no) + ".txt"
df.to_csv(filename, sep='|', header=None, encoding='utf-8')
#writeavrofile.WriteAvroFile(df, partition_no, '/Users/sriyan/Downloads/sample_avro_file').write_using_fastavro()
print("duration =", timer() - start, " seconds for writing")
if __name__ == "__main__":
start = timer()
try:
actors = {}
result_ids_step1 =[]
completed_dfs_step1 = []
completed_ids_step1 = []
result_ids_step2 = []
completed_ids_step2 = []
npartitions = df.npartitions
parallel = df.npartitions//2
temp_list = []
################################STEP1##################################
'''Workers for executing step1 tasks'''
for i in range(parallel):
x = temp_list[-1] if temp_list else 0
y = [x, x+1] if x not in temp_list else [x+1, x+2]
temp_list.extend(y)
actors[i] = Pipeline.remote()
result_ids_step1.append(actors[i].transform.remote(df, y[0]))
result_ids_step1.append(actors[i].transform.remote(df, y[1]))
'''Get step1 status'''
while len(result_ids_step1):
done_ids, result_ids_step1 = ray.wait(result_ids_step1)
completed_ids_step1.extend(done_ids if isinstance(done_ids, list) else [done_ids])
'''Prepare list of data frames to be merged '''
dflist = [ray.get(objref_id) for objref_id in completed_ids_step1]
'''Get the merged data frame'''
df_concat = pd.concat(dflist)
################################STEP3##################################
'''Get the partitions metadata based on keys'''
partition_keys = 'Region'
sort_keys = ['Order_Date']
partition_metadata = Utils().define_partitions(seq = list(df_concat[partition_keys].value_counts(dropna=False).keys()),
num_of_partitions = parallel)
print(partition_metadata)
'''Repartition data: partition using metadata >> sort >> send to workers'''
for i, x in enumerate(partition_metadata):
#actors[x] = Pipeline.remote()
result_ids_step2.append(actors[i].re_partition_sort_data.remote(df_concat[df_concat[partition_keys].isin(x)], x, partition_keys, sort_keys, i))
#result_ids_step2.append(actors[i].re_partition_sort_data.remote(df_concat, x, partition_keys, sort_keys, i))
'''Get step2 status'''
while len(result_ids_step2):
done_ids, result_ids_step2 = ray.wait(result_ids_step2)
completed_ids_step2.extend(done_ids if isinstance(done_ids, list) else [done_ids])
finally:
pass
'''for x in actors.keys():
ray.kill(actors[x])'''
print("duration =", timer() - start, " seconds") |
# pylint: disable=W0212
# W0212: It is fine to access protected members for test purposes.
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test module for the property type registry.
"""
import unittest
from datafinder.core.configuration.properties import constants
from datafinder.core.configuration.properties.property_definition import PropertyDefinition, PropertyDefinitionFactory
from datafinder.core.configuration.properties.registry import PropertyDefinitionRegistry
from datafinder.core.configuration.properties import property_type
from datafinder.core.error import ConfigurationError
__version__ = "$Revision-Id:$"
class PropertyTypeRegistryTestCase(unittest.TestCase):
""" Tests for the module property_type_registry. """
def setUp(self):
""" Creates the required test environment. """
self._propDef = PropertyDefinition(
"testID", constants.USER_PROPERTY_CATEGORY, property_type.StringType())
self._registry = PropertyDefinitionRegistry(PropertyDefinitionFactory(), True)
self._regPropsNumber = len(self._registry.registeredPropertyDefinitions)
def tearDown(self):
""" Cleans up test environment. """
self._registry.unregister([self._propDef])
def testRemovingOfNonExistingPropertyType(self):
""" Tests the removing of a non-existing property type definition. """
self._registry.unregister([self._propDef])
self.assertEquals(len(self._registry.registeredPropertyDefinitions),
self._regPropsNumber)
def testMuliplePropertyTypeAdding(self):
""" Tests the multiple adding of the identical property type definition. """
self._registry.register([self._propDef, self._propDef])
self.assertEquals(
len(self._registry.registeredPropertyDefinitions), self._regPropsNumber + 1)
def testRegisterPropertyType(self):
""" Tests the registering of a property type. """
self._registry.register([self._propDef])
self.failIf(
not self._registry.isPropertyDefinitionRegistered(self._propDef),
"The property was not registered.")
# Trying to register a system-specific properties again
systemPropDef = self._registry.systemPropertyDefinitions[0]
self.assertTrue(self._registry.existsSystemPropertyDefinition(systemPropDef.identifier))
self.assertRaises(ConfigurationError, self._registry.register, [systemPropDef])
self._registry.clear()
self.assertEquals(len(self._registry.registeredPropertyDefinitions),
self._regPropsNumber)
self.assertEquals(len(self._registry.systemPropertyDefinitions),
self._regPropsNumber)
def testUnregisterPropertyType(self):
""" Tests the registering of a property type. """
self._registry.register([self._propDef])
self._registry.unregister([self._propDef])
self.failIf(
self._registry.isPropertyDefinitionRegistered(self._propDef),
"The property was not registered.")
def testPropertyTypeMappingUnmodifiable(self):
""" Tests that property type mapping cannot be changed from outside. """
registeredProperties = self._registry.registeredPropertyDefinitions
for propertyDef in registeredProperties:
if not propertyDef in self._registry.registeredPropertyDefinitions:
self.fail("Property definition not available.")
registeredProperties[self._propDef.identifier] = self._propDef
self.assertEquals(
len(self._registry.registeredPropertyDefinitions), self._regPropsNumber)
equal = True
for propertyDef in registeredProperties:
if not propertyDef in self._registry.registeredPropertyDefinitions:
equal = False
if equal:
self.fail("Property definition changed from outside.")
def testUpdateRegisteredPropertyType(self):
""" Tests the update of a registered property type definition. """
self._registry.register([self._propDef])
self._propDef.description = "New Description"
self._registry.register([self._propDef])
propertyDefRegistry = self._registry.registeredPropertyDefinitions\
[(self._propDef.namespace, self._propDef.identifier)]
self.assertEquals(propertyDefRegistry.description, self._propDef.description)
def testGetPropertyDefinition(self):
self._registry.register([self._propDef])
retrievedPropDef = self._registry.getPropertyDefinition(self._propDef.identifier)
self.assertEquals(retrievedPropDef, self._propDef)
self.assertNotEquals(id(retrievedPropDef), id(self._propDef))
# Non-registered properties are created
newPropDef = self._registry.getPropertyDefinition("new")
self.assertTrue(not self._registry.isPropertyDefinitionRegistered(newPropDef)) # but not registered
def testDataFields(self):
""" Checks the different attributes data / fields. """
self.assertEquals(self._registry.defaultArchivePropertyDefinitions,
self._registry._defaultArchivePropertyDefinitions)
self.assertNotEquals(id(self._registry.defaultArchivePropertyDefinitions),
id(self._registry._defaultArchivePropertyDefinitions))
self.assertEquals(self._registry.defaultResourcePropertyDefinitions,
self._registry._defaultResourcePropertyDefinitions)
self.assertNotEquals(id(self._registry.defaultResourcePropertyDefinitions),
id(self._registry._defaultResourcePropertyDefinitions))
self.assertEquals(self._registry.defaultCollectionPropertyDefinitions,
self._registry._defaultCollectionPropertyDefinitions)
self.assertNotEquals(id(self._registry.defaultCollectionPropertyDefinitions),
id(self._registry._defaultCollectionPropertyDefinitions))
self.assertEquals(self._registry.registeredPropertyDefinitions,
self._registry._registeredPropertyDefinitions)
self.assertNotEquals(id(self._registry.registeredPropertyDefinitions),
id(self._registry._registeredPropertyDefinitions))
self.assertEquals(self._registry.systemPropertyDefinitions,
self._registry._systemPropertyDefinitions)
self.assertNotEquals(id(self._registry.systemPropertyDefinitions),
id(self._registry._systemPropertyDefinitions))
def testPropertyNameValidationFunction(self):
""" Ensures that the extended check function works as expected. """
# Check for non-existing ID
testFunction = self._registry.propertyNameValidationFunction
self.assertEquals(testFunction("unknownId"), True)
# Check for existing ID
self._registry.register([self._propDef])
testFunction = self._registry.propertyNameValidationFunction
self.assertEquals(testFunction(self._propDef.identifier), False)
|
from datetime import datetime, timedelta
import numpy as np
NA = float(np.nan)
FILTERS = {
"chart_type": ["regional", "viral"],
"period": ["daily", "weekly"],
"region": [
"global",
"us",
"gb",
"ar",
"at",
"au",
"be",
"bg",
"bo",
"br",
"ca",
"ch",
"cl",
"co",
"cr",
"cz",
"de",
"dk",
"do",
"ec",
"ee",
"es",
"fi",
"fr",
"gr",
"gt",
"hk",
"hn",
"hu",
"id",
"ie",
"il",
"in",
"is",
"it",
"jp",
"lt",
"lv",
"mx",
"my",
"ni",
"nl",
"no",
"nz",
"pa",
"pe",
"ph",
"pl",
"pt",
"py",
"ro",
"ru",
"se",
"sg",
"sk",
"sv",
"th",
"tr",
"tw",
"ua",
"uy",
"vn",
"za",
],
}
START_DAILY = datetime(2017, 1, 1)
START_WEEKLY = datetime(2017, 1, 5)
END_DATE = datetime.combine(datetime.today().date(), datetime.min.time()) - timedelta(
days=1
)
SPOTIFY_API_CLIENT_ID = "dfe9ee5938724c5bb285d31cf44dd315"
SPOTIFY_API_CLIENT_SECRET = "cba0228465254950a1209e573c81650e"
|
from office365.runtime.paths.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.sharing.picker_settings import PickerSettings
class SharingInformation(BaseEntity):
"""Represents a response for Microsoft.SharePoint.Client.Sharing.SecurableObjectExtensions.GetSharingInformation.
The accessRequestSettings, domainRestrictionSettings and permissionsInformation properties are not included in
the default scalar property set for this type.
"""
@property
def picker_settings(self):
"""PickerSettings used by the PeoplePicker Control."""
return self.properties.get('pickerSettings',
PickerSettings(self.context, ResourcePath("pickerSettings", self.resource_path)))
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is to process the code coverage metadata."""
import collections
import json
import logging
import re
import urlparse
import zlib
import cloudstorage
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import ndb
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf import json_format
from common import monitoring
from common.findit_http_client import FinditHttpClient
from common.waterfall.buildbucket_client import GetV2Build
from gae_libs.appengine_util import IsInternalInstance
from gae_libs.caches import PickledMemCache
from gae_libs.dashboard_util import GetPagedResults
from gae_libs.handlers.base_handler import BaseHandler, Permission
from gae_libs.gitiles.cached_gitiles_repository import CachedGitilesRepository
from libs.cache_decorator import Cached
from libs.deps import chrome_dependency_fetcher
from libs.time_util import ConvertUTCToPST
from model import entity_util
from model.proto.gen.code_coverage_pb2 import CoverageReport
from model.code_coverage import DependencyRepository
from model.code_coverage import PostsubmitReport
from model.code_coverage import FileCoverageData
from model.code_coverage import PresubmitCoverageData
from model.code_coverage import SummaryCoverageData
from waterfall import waterfall_config
# List of Gerrit projects that the Code Coverage service supports.
_PROJECTS_WHITELIST = set(['chromium/src'])
_ALLOWED_GITILES_HOST = set([
'android.googlesource.com',
'aomedia.googlesource.com',
'boringssl.googlesource.com',
'chromium.googlesource.com',
'dawn.googlesource.com',
'pdfium.googlesource.com',
'quiche.googlesource.com',
'skia.googlesource.com',
'swiftshader.googlesource.com',
'webrtc.googlesource.com',
])
# The regex to extract the build id from the url path.
_BUILD_ID_REGEX = re.compile(r'.*/build/(\d+)$')
# Cloud storage bucket used to store the source files fetched from gitile.
_SOURCE_FILE_GS_BUCKET = 'source-files-for-coverage'
# Dependencies to skip adding to manifest. Maps root repo url to list of
# dependency paths (relative to the root of the checkout).
_BLACKLISTED_DEPS = {
'https://chromium.googlesource.com/chromium/src.git': [
'src/ios/third_party/webkit/src'
],
}
# A mapping from platform to related info such as builder name and ui name.
_POSTSUBMIT_PLATFORM_INFO_MAP = {
'linux': {
'bucket': 'coverage',
'builder': 'linux-code-coverage',
'ui_name': 'Linux',
},
'chromeos-vm': {
'bucket': 'ci',
'builder': 'chromeos-vm-code-coverage',
'ui_name': 'ChromeOS VM',
},
'linux-chromeos': {
'bucket': 'ci',
'builder': 'linux-chromeos-code-coverage',
'ui_name': 'ChromeOS on Linux',
},
'oobe-code-mauve': {
'bucket': 'ci',
'builder': 'linux-chromeos-oobe-code-coverage',
'ui_name': 'ChromeOS on Linux for OOBE',
'hidden': True,
},
}
def _GetSameOrMostRecentReportForEachPlatform(host, project, ref, revision):
"""Find the matching report on other platforms, or the most recent.
The intent of this function is to help the UI list the platforms that are
available, and let the user switch. If a report with the same revision exists
and is supposed to be visible to the public users, use it, otherwise use the
most recent visible one.
"""
result = {}
platforms = _POSTSUBMIT_PLATFORM_INFO_MAP.keys()
for platform in platforms:
# Some 'platforms' are hidden from the selection to avoid confusion, as they
# may be custom reports that do not make sense outside a certain team.
# They should still be reachable via a url.
if (_POSTSUBMIT_PLATFORM_INFO_MAP[platform].get('hidden') and
not users.is_current_user_admin()):
continue
bucket = _POSTSUBMIT_PLATFORM_INFO_MAP[platform]['bucket']
builder = _POSTSUBMIT_PLATFORM_INFO_MAP[platform]['builder']
same_report = PostsubmitReport.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
bucket=bucket,
builder=builder)
if same_report and same_report.visible:
result[platform] = same_report
continue
query = PostsubmitReport.query(
PostsubmitReport.gitiles_commit.server_host == host,
PostsubmitReport.gitiles_commit.project == project,
PostsubmitReport.bucket == bucket, PostsubmitReport.builder == builder,
PostsubmitReport.visible == True).order(
-PostsubmitReport.commit_position).order(
-PostsubmitReport.commit_timestamp)
entities = query.fetch(limit=1)
if entities:
result[platform] = entities[0]
return result
def _MakePlatformSelect(host, project, ref, revision, path, current_platform):
"""Populate values needed to render a form to let the user switch platforms.
This will produce parameters needed for the form to post to the same page so
that upon submission it loads the report at the same path, and it will also
provide the options that can be selected in the dropdown.
"""
result = {
'params': {
'host': host,
'project': project,
'ref': ref,
},
'options': [],
}
if path:
result['params']['path'] = path
for platform, report in _GetSameOrMostRecentReportForEachPlatform(
host, project, ref, revision).iteritems():
value = platform
if report.gitiles_commit.revision == revision:
# If the same revision is available in the target platform, append it to
# the platform name s.t. the form can populate this revision field before
# submission.
value = '%s#%s' % (platform, revision)
result['options'].append({
'value': value,
'ui_name': _POSTSUBMIT_PLATFORM_INFO_MAP[platform]['ui_name'],
'selected': platform == current_platform,
})
return result
def _GetValidatedData(gs_path): # pragma: no cover.
"""Returns the json data from the given GS path after validation.
Args:
gs_path (str): Path to the file, in the format /bucket/object.
Returns:
json_data (dict): the json data of the file pointed by the given GS url, or
None if the data can't be retrieved.
"""
logging.info('Fetching data from %s', gs_path)
content = _GetFileContentFromGs(gs_path)
assert content, 'Failed to fetch coverage json data from %s' % gs_path
logging.info('Decompressing and loading coverage data...')
decompressed_data = zlib.decompress(content)
del content # Explicitly release memory.
data = json.loads(decompressed_data)
del decompressed_data # Explicitly release memory.
logging.info('Finished decompressing and loading coverage data.')
# Validate that the data is in good format.
logging.info('Validating coverage data...')
report = CoverageReport()
json_format.ParseDict(data, report, ignore_unknown_fields=False)
del report # Explicitly delete the proto message to release memory.
logging.info('Finished validating coverage data.')
return data
def _DecompressLines(line_ranges): # pragma: no cover.
"""Decompress the lines data to a flat format.
For example:
[
{
"count": 1,
"first": 165, // inclusive
"last": 166 // inclusive
}
]
After decompressing, it becomes:
[
{
"line": 165,
"count": 1
},
{
"line": 166,
"count": 1
}
]
Args:
line_ranges: A list of dict, with format
[{"first": int, "last": int, "count": int}, ...], and note that
the [first, last] are both inclusive.
Returns:
A list of dict, with format
[{"line": int, "count": int}].
"""
decompressed_lines = []
for line_range in line_ranges:
for line_num in range(line_range['first'], line_range['last'] + 1):
decompressed_lines.append({
'line': line_num,
'count': line_range['count']
})
return decompressed_lines
def _RetrieveManifest(repo_url, revision, os_platform): # pragma: no cover.
"""Returns the manifest of all the dependencies for the given revision.
Args:
repo_url (str): The url to the Gitiles project of the root repository.
revision (str): The revision of the root repository.
os_platform (str): The platform of the code checkout.
Returns:
A list of DependencyRepository instances ordered reversely by the relative
path of each dependency checkout in the checkout of the root repository.
The longer the relative path, the smaller index in the returned list.
The reverse order is to make it easy to reliably determine which dependency
a file is from, when given a file path relative to the root repository.
"""
manifest = []
root_dir = 'src/'
def AddDependencyToManifest(path, url, revision): # pragma: no cover.
if path.startswith(root_dir):
path = path[len(root_dir):]
assert not path.startswith('//')
path = '//' + path
if not path.endswith('/'):
path = path + '/'
# Parse the url to extract the hostname and project name.
# For "https://chromium.google.com/chromium/src.git", we get
# ParseResult(netloc='chromium.google.com', path='/chromium/src.git', ...)
result = urlparse.urlparse(url)
assert result.path, 'No project extracted from %s' % url
manifest.append(
DependencyRepository(
path=path,
server_host=result.netloc,
project=result.path[1:], # Strip the leading '/'.
revision=revision))
# Add the root repository.
AddDependencyToManifest('src/', repo_url, revision)
# Add all the dependent repositories.
# DEPS fetcher now assumes chromium/src and master branch.
dep_fetcher = chrome_dependency_fetcher.ChromeDependencyFetcher(
CachedGitilesRepository.Factory(FinditHttpClient()))
deps = dep_fetcher.GetDependency(revision, os_platform)
for path, dep in deps.iteritems():
# Remove clause when crbug.com/929315 gets fixed.
if path in _BLACKLISTED_DEPS.get(repo_url, []):
continue
AddDependencyToManifest(path, dep.repo_url, dep.revision)
manifest.sort(key=lambda x: len(x.path), reverse=True)
return manifest
def _GetMatchedDependencyRepository(report, file_path): # pragma: no cover.
"""Gets the matched dependency in the manifest of the report.
Args:
report (PostsubmitReport): The report that the file is associated with.
file_path (str): Source absolute path to the file.
Returns:
A DependencyRepository if a matched one is found and it is whitelisted,
otherwise None.
"""
assert file_path.startswith('//'), 'All file path should start with "//".'
dependency = None
for dep in report.manifest:
if file_path.startswith(dep.path):
dependency = dep
break
if not dependency or dependency.server_host not in _ALLOWED_GITILES_HOST:
return None
return dependency
def _ComposeSourceFileGsPath(report, file_path, revision):
"""Composes a cloud storage path for a specific revision of a source file.
Args:
report (PostsubmitReport): The report that the file is associated with.
file_path (str): Source absolute path to the file.
revision (str): The gitile revision of the file in its own repo.
Returns:
Cloud storage path to the file, in the format /bucket/object. For example,
/source-files-for-coverage/chromium.googlesource.com/v8/v8/src/date.cc/1234.
"""
assert file_path.startswith('//'), 'All file path should start with "//".'
assert revision, 'A valid revision is required'
dependency = _GetMatchedDependencyRepository(report, file_path)
assert dependency, (
'%s file does not belong to any dependency repository' % file_path)
# Calculate the relative path to the root of the dependency repository itself.
relative_file_path = file_path[len(dependency.path):]
return '/%s/%s/%s/%s/%s' % (_SOURCE_FILE_GS_BUCKET, dependency.server_host,
dependency.project, relative_file_path, revision)
def _IsFileAvailableInGs(gs_path): # pragma: no cover.
"""Returns True if the specified object exists, otherwise False.
Args:
gs_path (str): Path to the file, in the format /bucket/object.
Returns:
True if the object exists, otherwise False.
"""
try:
_ = cloudstorage.stat(gs_path)
return True
except cloudstorage.NotFoundError:
return False
def _GetFileContentFromGs(gs_path): # pragma: no cover.
"""Reads the content of a file in cloud storage.
This method is more expensive than |_IsFileAvailableInGs|, so if the goal is
to check if a file exists, |_IsFileAvailableInGs| is preferred.
Args:
gs_path (str): Path to the file, in the format /bucket/object.
Returns:
The content of the file if it exists, otherwise None."""
try:
with cloudstorage.open(gs_path) as f:
return f.read()
except cloudstorage.NotFoundError:
return None
def _WriteFileContentToGs(gs_path, content): # pragma: no cover.
"""Writes the content of a file to cloud storage.
Args:
gs_path (str): Path to the file, in the format /bucket/object.
content (str): Content of the file.
"""
write_retry_params = cloudstorage.RetryParams(backoff_factor=2)
with cloudstorage.open(
gs_path, 'w', content_type='text/plain',
retry_params=write_retry_params) as f:
f.write(content)
def _GetFileContentFromGitiles(report, file_path,
revision): # pragma: no cover.
"""Fetches the content of a specific revision of a file from gitiles.
Args:
report (PostsubmitReport): The report that the file is associated with.
file_path (str): Source absolute path to the file.
revision (str): The gitile revision of the file.
Returns:
The content of the source file."""
assert file_path.startswith('//'), 'All file path should start with "//".'
assert revision, 'A valid revision is required'
dependency = _GetMatchedDependencyRepository(report, file_path)
assert dependency, (
'%s file does not belong to any dependency repository' % file_path)
# Calculate the relative path to the root of the dependency repository itself.
relative_file_path = file_path[len(dependency.path):]
repo = CachedGitilesRepository(FinditHttpClient(), dependency.project_url)
return repo.GetSource(relative_file_path, revision)
def _IsReportSuspicious(report):
"""Returns True if the newly generated report is suspicious to be incorrect.
A report is determined to be suspicious if and only if the absolute difference
between its line coverage percentage and the most recent visible report is
greater than 1.00%.
Args:
report (PostsubmitReport): The report to be evaluated.
Returns:
True if the report is suspicious, otherwise False.
"""
def _GetLineCoveragePercentage(report): # pragma: no cover
line_coverage_percentage = None
summary = report.summary_metrics
for feature_summary in summary:
if feature_summary['name'] == 'line':
line_coverage_percentage = float(
feature_summary['covered']) / feature_summary['total']
break
assert line_coverage_percentage is not None, (
'Given report has invalid summary')
return line_coverage_percentage
target_server_host = report.gitiles_commit.server_host
target_project = report.gitiles_commit.project
target_bucket = report.bucket
target_builder = report.builder
most_recent_visible_reports = PostsubmitReport.query(
PostsubmitReport.gitiles_commit.server_host == target_server_host,
PostsubmitReport.gitiles_commit.project == target_project,
PostsubmitReport.bucket == target_bucket,
PostsubmitReport.builder == target_builder, PostsubmitReport.visible ==
True).order(-PostsubmitReport.commit_position).order(
-PostsubmitReport.commit_timestamp).fetch(1)
if not most_recent_visible_reports:
logging.warn('No existing visible reports to use for reference, the new '
'report is determined as not suspicious by default')
return False
most_recent_visible_report = most_recent_visible_reports[0]
if abs(
_GetLineCoveragePercentage(report) -
_GetLineCoveragePercentage(most_recent_visible_report)) > 0.01:
return True
return False
class FetchSourceFile(BaseHandler):
PERMISSION_LEVEL = Permission.APP_SELF
def HandlePost(self):
report_key = self.request.get('report_key')
path = self.request.get('path')
revision = self.request.get('revision')
assert report_key, 'report_key is required'
assert path, 'path is required'
assert revision, 'revision is required'
report = entity_util.GetEntityFromUrlsafeKey(report_key)
assert report, (
'Postsubmit report does not exist for urlsafe key' % report_key)
file_content = _GetFileContentFromGitiles(report, path, revision)
if not file_content:
logging.error(
'Failed to get file from gitiles for %s@%s' % (path, revision))
return
gs_path = _ComposeSourceFileGsPath(report, path, revision)
_WriteFileContentToGs(gs_path, file_content)
class ProcessCodeCoverageData(BaseHandler):
PERMISSION_LEVEL = Permission.APP_SELF
def _ProcessFullRepositoryData(self, commit, data, full_gs_metadata_dir,
builder, build_id):
# Load the commit log first so that we could fail fast before redo all.
repo_url = 'https://%s/%s.git' % (commit.host, commit.project)
change_log = CachedGitilesRepository(FinditHttpClient(),
repo_url).GetChangeLog(commit.id)
assert change_log is not None, 'Failed to retrieve the commit log'
# Load the manifest based on the DEPS file.
# TODO(crbug.com/921714): output the manifest as a build output property.
manifest = _RetrieveManifest(repo_url, commit.id, 'unix')
report = PostsubmitReport.Create(
server_host=commit.host,
project=commit.project,
ref=commit.ref,
revision=commit.id,
bucket=builder.bucket,
builder=builder.builder,
commit_position=change_log.commit_position,
commit_timestamp=change_log.committer.time,
manifest=manifest,
summary_metrics=data.get('summaries'),
build_id=build_id,
visible=False)
report.put()
# Save the file-level, directory-level and line-level coverage data.
for data_type in ('dirs', 'components', 'files', 'file_shards'):
sub_data = data.get(data_type)
if not sub_data:
continue
logging.info('Processing %d entries for %s', len(sub_data), data_type)
actual_data_type = data_type
if data_type == 'file_shards':
actual_data_type = 'files'
def FlushEntries(entries, total, last=False):
# Flush the data in a batch and release memory.
if len(entries) < 100 and not (last and entries):
return entries, total
ndb.put_multi(entries)
total += len(entries)
logging.info('Dumped %d coverage data entries of type %s', total,
actual_data_type)
return [], total
def IterateOverFileShards(file_shards):
for file_path in file_shards:
url = '%s/%s' % (full_gs_metadata_dir, file_path)
# Download data one by one.
yield _GetValidatedData(url).get('files', [])
if data_type == 'file_shards':
data_iterator = IterateOverFileShards(sub_data)
else:
data_iterator = [sub_data]
entities = []
total = 0
component_summaries = []
for dataset in data_iterator:
for group_data in dataset:
if actual_data_type == 'components':
component_summaries.append({
'name': group_data['path'],
'path': group_data['path'],
'summaries': group_data['summaries'],
})
if actual_data_type == 'files' and 'revision' in group_data:
self._FetchAndSaveFileIfNecessary(report, group_data['path'],
group_data['revision'])
if actual_data_type == 'files':
coverage_data = FileCoverageData.Create(
server_host=commit.host,
project=commit.project,
ref=commit.ref,
revision=commit.id,
path=group_data['path'],
bucket=builder.bucket,
builder=builder.builder,
data=group_data)
else:
coverage_data = SummaryCoverageData.Create(
server_host=commit.host,
project=commit.project,
ref=commit.ref,
revision=commit.id,
data_type=actual_data_type,
path=group_data['path'],
bucket=builder.bucket,
builder=builder.builder,
data=group_data)
entities.append(coverage_data)
entities, total = FlushEntries(entities, total, last=False)
del dataset # Explicitly release memory.
FlushEntries(entities, total, last=True)
if component_summaries:
component_summaries.sort(key=lambda x: x['path'])
SummaryCoverageData.Create(
server_host=commit.host,
project=commit.project,
ref=commit.ref,
revision=commit.id,
data_type='components',
path='>>',
bucket=builder.bucket,
builder=builder.builder,
data={
'dirs': component_summaries,
'path': '>>'
}).put()
component_summaries = []
logging.info('Summary of all components are saved to datastore.')
if not _IsReportSuspicious(report):
report.visible = True
report.put()
monitoring.code_coverage_full_reports.increment({
'host':
commit.host,
'project':
commit.project,
'ref':
commit.ref or 'refs/heads/master',
'builder':
'%s/%s/%s' % (builder.project, builder.bucket, builder.builder),
})
def _FetchAndSaveFileIfNecessary(self, report, path, revision):
"""Fetches the file from gitiles and store to cloud storage if not exist.
Args:
report (PostsubmitReport): The report that the file is associated with.
path (str): Source absolute path to the file.
revision (str): The gitile revision of the file in its own repo.
"""
# Due to security concerns, don't cache source files for internal projects.
if IsInternalInstance():
return
assert path.startswith('//'), 'All file path should start with "//"'
assert revision, 'A valid revision is required'
gs_path = _ComposeSourceFileGsPath(report, path, revision)
if _IsFileAvailableInGs(gs_path):
return
# Fetch the source files from gitile and save it in gs so that coverage
# file view can be quickly rendered.
url = ('/coverage/task/fetch-source-file')
params = {
'report_key': report.key.urlsafe(),
'path': path,
'revision': revision
}
taskqueue.add(
method='POST',
url=url,
target='code-coverage-backend',
queue_name='code-coverage-fetch-source-file',
params=params)
def _ProcessCLPatchData(self, patch, data, build_id):
"""Processes and updates coverage data for per-cl build.
Part of the responsibility of this method is to calculate per-file coverage
percentage for the following use cases:
1. Surface them on Gerrit to provide an overview of the test coverage of
the CL for authors and reviewers.
2. For metrics tracking to understand the impact of the coverage data.
Args:
patch (tuple): A tuple of two properties: change (int) and patchset (int).
data (list): A list of dicts with the following properties:
'path': Source absolute path to the source file.
'lines': A list of dicts with the following properties:
'count': #times the range is executed.
'first': Starting line number of the range (inclusive).
'last': Ending line number of the range (inclusive).
build_id (int): Id of the build to process coverage data for.
"""
# Calculate absolute coverage percentage.
for per_file_data in data:
num_covered_lines = 0
num_total_lines = 0
for range_data in per_file_data['lines']:
num_lines = range_data['last'] - range_data['first'] + 1
num_total_lines += num_lines
if range_data['count'] > 0:
num_covered_lines += num_lines
absolute_percentage = (100 * num_covered_lines) / num_total_lines
per_file_data['covered_lines'] = num_covered_lines
per_file_data['total_lines'] = num_total_lines
per_file_data['absolute_coverage_percentage'] = absolute_percentage
# For a CL/patch, we save the entire data in one entity.
PresubmitCoverageData.Create(
server_host=patch.host,
change=patch.change,
patchset=patch.patchset,
build_id=build_id,
data=data).put()
def _processCodeCoverageData(self, build_id):
build = GetV2Build(
build_id,
fields=FieldMask(paths=['id', 'output.properties', 'input', 'builder']))
if not build:
return BaseHandler.CreateError(
'Could not retrieve build #%d from buildbucket, retry' % build_id,
404)
if not self._IsCoverageBuild(build.builder.project, build.builder.bucket,
build.builder.builder):
return
# Convert the Struct to standard dict, to use .get, .iteritems etc.
properties = dict(build.output.properties.items())
gs_bucket = properties.get('coverage_gs_bucket')
gs_metadata_dir = properties.get('coverage_metadata_gs_path')
if properties.get('process_coverage_data_failure'):
monitoring.code_coverage_cq_errors.increment({
'project': build.builder.project,
'bucket': build.builder.bucket,
'builder': build.builder.builder,
})
# Ensure that the coverage data is ready.
if not gs_bucket or not gs_metadata_dir:
logging.warn('coverage GS bucket info not available in %r', build.id)
return
full_gs_metadata_dir = '/%s/%s' % (gs_bucket, gs_metadata_dir)
all_json_gs_path = '%s/all.json.gz' % full_gs_metadata_dir
data = _GetValidatedData(all_json_gs_path)
# For presubmit coverage, save the whole data in json.
if build.builder.bucket == 'try':
# Assume there is only 1 patch which is true in CQ.
assert len(build.input.gerrit_changes) == 1, 'Expect only one patchset'
patch = build.input.gerrit_changes[0]
self._ProcessCLPatchData(patch, data['files'], build_id)
# For postsubmit coverage, we save the data by file and directory.
else:
if not self._IsGitilesCommitAvailable(build.input.gitiles_commit):
self._SetGitilesCommitFromOutputProperty(build, properties)
assert self._IsGitilesCommitAvailable(build.input.gitiles_commit), (
'gitiles commit information is expected to be available either in '
'input properties or output properties')
self._ProcessFullRepositoryData(build.input.gitiles_commit, data,
full_gs_metadata_dir, build.builder,
build_id)
def _IsGitilesCommitAvailable(self, gitiles_commit):
"""Returns True if gitiles_commit is available in the input property."""
return (gitiles_commit.host and gitiles_commit.project and
gitiles_commit.ref and gitiles_commit.id)
def _SetGitilesCommitFromOutputProperty(self, build, output_properties):
"""Set gitiles_commit of the build from output properties."""
logging.info('gitiles_commit is not available in the input properties, '
'set them from output properties.')
build.input.gitiles_commit.host = output_properties.get(
'gitiles_commit_host')
build.input.gitiles_commit.project = output_properties.get(
'gitiles_commit_project')
build.input.gitiles_commit.ref = output_properties.get('gitiles_commit_ref')
build.input.gitiles_commit.id = output_properties.get('gitiles_commit_id')
# TODO(crbug.com/965559): Move this to a config, which can be easily changed
# without commit/deployment cycles.
def _IsCoverageBuild(self, project, bucket, builder):
"""Returns True if the given build is related to code coverage.
Args:
project (str): buildbucket project name.
bucket (str): buildbucket bucket name.
builder (str): buildbucket builder name.
Returns:
True if the given build is related to code coverage, otherwise False.
"""
# The internal instance of the coverage service is deployed and used ONLY by
# the cast and libassistant team.
if IsInternalInstance():
return (project in ('cast-chromecast-internal') and
bucket in ('master.tryserver.cast-chromecast-internal.gce') and
builder in ('libassistant-absolute_coverage'))
return (project in ('chromium', 'chrome') and
bucket in ('coverage', 'ci', 'try') and
builder in ('chromeos-vm-code-coverage',
'linux-chromeos-code-coverage',
'linux-chromeos-oobe-code-coverage',
'linux-code-coverage', 'linux-coverage-rel'))
def HandlePost(self):
"""Loads the data from GS bucket, and dumps them into ndb."""
logging.info('Processing: %s', self.request.path)
match = _BUILD_ID_REGEX.match(self.request.path)
if not match:
logging.info('Build id not found')
return
build_id = int(match.group(1))
return self._processCodeCoverageData(build_id)
def HandleGet(self):
return self.HandlePost() # For local testing purpose.
def _IsServePresubmitCoverageDataEnabled():
"""Returns True if the feature to serve presubmit coverage data is enabled.
Returns:
Returns True if it is enabled, otherwise, False.
"""
# Unless the flag is explicitly set, assuming disabled by default.
return waterfall_config.GetCodeCoverageSettings().get(
'serve_presubmit_coverage_data', False)
def _GetBanner(project):
"""If there is a service banner for a given project landing page, return it.
E.g. a maintenance announcement or outage acknowledgement, etc.
The setting is expected to be a dict mapping a project to the contents of the
div tag for the banner. If no project banner is defined, return the default
one.
This expected to be None if no banner is to be shown.
"""
banners = waterfall_config.GetCodeCoverageSettings().get(
'project_banners', {})
return banners.get(project, banners.get('default'))
def _GetPathRootAndSeparatorFromDataType(data_type):
"""Returns the path of the root and path separator for the given data type."""
if data_type in ('files', 'dirs'):
return '//', '/'
elif data_type == 'components':
return '>>', '>'
return None, None
def _GetNameToPathSeparator(path, data_type):
"""Returns a list of [name, sub_path] for the given path.
Example:
1. //root/src/file.cc -> [
['root/', '//root/'],
['src/', '//root/src/'],
['file.cc', '//root/src/file.cc']
]
2. //root/src/path1/ -> [
['root/', '//root/'],
['src/', '//root/src/'],
['path1/', '//root/src/path1/']
]
3. component1>component2 -> [
['component1', 'component1'],
['component2', 'component1>component2'],
]
"""
path_parts = []
if not path:
return path_parts
path_root, path_separator = _GetPathRootAndSeparatorFromDataType(data_type)
if path == path_root:
return path_parts
if data_type == 'components':
index = 0
else:
index = 2 # Skip the leading '//' in the path.
while index >= 0:
next_index = path.find(path_separator, index)
if next_index >= 0:
name = path[index:next_index + 1]
if data_type == 'components':
sub_path = path[:next_index]
else:
sub_path = path[:next_index + 1]
next_index += 1
else:
name = path[index:]
sub_path = path
path_parts.append([name, sub_path])
index = next_index
return path_parts
def _SplitLineIntoRegions(line, uncovered_blocks):
"""Returns a list of regions for a line of code.
The structure of the output is as follows:
[
{
'covered': True/False # Whether this region is actually covered.
'text': string # The source text for this region.
}
]
The regions in the output list are in the order they appear in the line.
For example, the following loop reconstructs the entire line:
text = ''
for region in _SplitLineIntoRegions(line, uncovered_blocks):
text += region['text']
assert text == line
"""
if not uncovered_blocks:
return [{'is_covered': True, 'text': line}]
regions = []
region_start = 0
for block in uncovered_blocks:
# Change from 1-indexing to 0-indexing
first = block['first'] - 1
last = block['last']
if last < 0:
last = len(line)
else:
last -= 1
# Generate the covered region that precedes this uncovered region.
preceding_text = line[region_start:first]
if preceding_text:
regions.append({'is_covered': True, 'text': preceding_text})
regions.append({
'is_covered': False,
# `last` is inclusive
'text': line[first:last + 1]
})
region_start = last + 1
# If there is any text left on the line, it must be covered. If it were
# uncovered, it would have been part of the final entry in uncovered_blocks.
remaining_text = line[region_start:]
if remaining_text:
regions.append({'is_covered': True, 'text': remaining_text})
return regions
class ServeCodeCoverageData(BaseHandler):
PERMISSION_LEVEL = Permission.ANYONE
def HandleGet(self):
host = self.request.get('host', 'chromium.googlesource.com')
project = self.request.get('project', 'chromium/src')
ref = self.request.get('ref', 'refs/heads/master')
change = self.request.get('change')
patchset = self.request.get('patchset')
revision = self.request.get('revision')
path = self.request.get('path')
data_type = self.request.get('data_type')
platform = self.request.get('platform', 'linux')
list_reports = self.request.get('list_reports', False)
if isinstance(list_reports, str):
list_reports = (list_reports.lower() == 'true')
cursor = self.request.get('cursor', None)
page_size = int(self.request.get('page_size', 100))
direction = self.request.get('direction', 'next').lower()
next_cursor = ''
prev_cursor = ''
if not data_type and path:
if path.endswith('/'):
data_type = 'dirs'
elif path and '>' in path:
data_type = 'components'
else:
data_type = 'files'
logging.info('host=%s', host)
logging.info('project=%s', project)
logging.info('ref=%s', ref)
logging.info('change=%s', change)
logging.info('patchset=%s', patchset)
logging.info('revision=%s', revision)
logging.info('data_type=%s', data_type)
logging.info('path=%s', path)
logging.info('platform=%s', platform)
if change and patchset:
logging.info('Servicing coverage data for presubmit')
if project not in _PROJECTS_WHITELIST:
kwargs = {'is_project_supported': False}
return BaseHandler.CreateError(
error_message='Project "%s" is not supported.' % project,
return_code=404,
allowed_origin='*',
**kwargs)
if not _IsServePresubmitCoverageDataEnabled():
# TODO(crbug.com/908609): Switch to 'is_service_enabled'.
kwargs = {'is_project_supported': False}
return BaseHandler.CreateError(
error_message=('The functionality has been temporarity disabled.'),
return_code=404,
allowed_origin='*',
**kwargs)
entity = PresubmitCoverageData.Get(
server_host=host, change=change, patchset=patchset)
if not entity:
return BaseHandler.CreateError(
'Requested coverage data is not found.', 404, allowed_origin='*')
data = entity.data
formatted_data = {'files': []}
for file_data in data:
path = file_data['path']
if path.startswith('//'): # Check for safe. Old data don't have '//'.
path = path[2:]
# TODO(crbug.com/967057): Due to that per-cl coverage bot runs with
# sandbox enabled and coverage build doesn't work with sandbox yet, the
# coverage data for blink code are wrong, so skip serving coverage data
# for those files. Remove this once the bug is fixed.
if project == 'chromium/src' and (path.startswith('third_party/blink')
or path.startswith('content/renderer')
or path.startswith('content/gpu')):
continue
formatted_data['files'].append({
'path': path,
'lines': _DecompressLines(file_data['lines']),
})
return {
'data': {
'host': host,
'project': project,
'change': change,
'patchset': patchset,
'data': formatted_data,
},
'allowed_origin': '*'
}
elif project:
logging.info('Servicing coverage data for postsubmit')
template = None
warning = None
if platform not in _POSTSUBMIT_PLATFORM_INFO_MAP:
return BaseHandler.CreateError(
'Platform: %s is not supported' % platform, 404)
bucket = _POSTSUBMIT_PLATFORM_INFO_MAP[platform]['bucket']
builder = _POSTSUBMIT_PLATFORM_INFO_MAP[platform]['builder']
if list_reports:
query = PostsubmitReport.query(
PostsubmitReport.gitiles_commit.server_host == host,
PostsubmitReport.gitiles_commit.project == project,
PostsubmitReport.bucket == bucket,
PostsubmitReport.builder == builder)
order_props = [(PostsubmitReport.commit_position, 'desc'),
(PostsubmitReport.commit_timestamp, 'desc')]
entities, prev_cursor, next_cursor = GetPagedResults(
query, order_props, cursor, direction, page_size)
# TODO(crbug.com/926237): Move the conversion to client side and use
# local timezone.
data = []
for entity in entities:
data.append({
'gitiles_commit': {
'revision': entity.gitiles_commit.revision,
},
'commit_position': entity.commit_position,
'commit_timestamp': ConvertUTCToPST(entity.commit_timestamp),
'summary_metrics': entity.summary_metrics,
'build_id': entity.build_id,
'visible': entity.visible,
})
template = 'coverage/project_view.html'
data_type = 'project'
else:
warning = None
if not data_type:
data_type = 'dirs'
if not revision:
query = PostsubmitReport.query(
PostsubmitReport.gitiles_commit.server_host == host,
PostsubmitReport.gitiles_commit.project == project,
PostsubmitReport.bucket == bucket, PostsubmitReport.builder ==
builder).order(-PostsubmitReport.commit_position).order(
-PostsubmitReport.commit_timestamp)
entities = query.fetch(limit=1)
report = entities[0]
revision = report.gitiles_commit.revision
else:
report = PostsubmitReport.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
bucket=bucket,
builder=builder)
if not report:
return BaseHandler.CreateError('Report record not found', 404)
template = 'coverage/summary_view.html'
if data_type == 'dirs':
default_path = '//'
elif data_type == 'components':
default_path = '>>'
else:
if data_type != 'files':
return BaseHandler.CreateError(
'Expected data_type to be "files", but got "%s"' % data_type,
400)
template = 'coverage/file_view.html'
path = path or default_path
if data_type == 'files':
entity = FileCoverageData.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
path=path,
bucket=bucket,
builder=builder)
if not entity:
warning = (
'File "%s" does not exist in this report, defaulting to root' %
path)
logging.warning(warning)
path = '//'
data_type = 'dirs'
template = 'coverage/summary_view.html'
if data_type != 'files':
entity = SummaryCoverageData.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
data_type=data_type,
path=path,
bucket=bucket,
builder=builder)
if not entity:
warning = (
'Path "%s" does not exist in this report, defaulting to root' %
path)
logging.warning(warning)
path = default_path
entity = SummaryCoverageData.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
data_type=data_type,
path=path,
bucket=bucket,
builder=builder)
metadata = entity.data
data = {
'commit_position': report.commit_position,
'metadata': metadata,
}
line_to_data = None
if data_type == 'files':
line_to_data = collections.defaultdict(dict)
if 'revision' in metadata:
gs_path = _ComposeSourceFileGsPath(report, path,
metadata['revision'])
file_content = _GetFileContentFromGs(gs_path)
if not file_content:
# Fetching files from Gitiles is slow, only use it as a backup.
file_content = _GetFileContentFromGitiles(report, path,
metadata['revision'])
else:
# If metadata['revision'] is empty, it means that the file is not
# a source file.
file_content = None
if not file_content:
line_to_data[1]['line'] = '!!!!No source code available!!!!'
line_to_data[1]['count'] = 0
else:
file_lines = file_content.splitlines()
for i, line in enumerate(file_lines):
# According to http://jinja.pocoo.org/docs/2.10/api/#unicode,
# Jinja requires passing unicode objects or ASCII-only bytestring,
# and given that it is possible for source files to have non-ASCII
# chars, thus converting lines to unicode.
line_to_data[i + 1]['line'] = unicode(line, 'utf8')
line_to_data[i + 1]['count'] = -1
uncovered_blocks = {}
if 'uncovered_blocks' in metadata:
for line_data in metadata['uncovered_blocks']:
uncovered_blocks[line_data['line']] = line_data['ranges']
for line in metadata['lines']:
for line_num in range(line['first'], line['last'] + 1):
line_to_data[line_num]['count'] = line['count']
if line_num in uncovered_blocks:
text = line_to_data[line_num]['line']
regions = _SplitLineIntoRegions(text,
uncovered_blocks[line_num])
line_to_data[line_num]['regions'] = regions
line_to_data[line_num]['is_partially_covered'] = True
else:
line_to_data[line_num]['is_partially_covered'] = False
line_to_data = list(line_to_data.iteritems())
line_to_data.sort(key=lambda x: x[0])
data['line_to_data'] = line_to_data
# Compute the mapping of the name->path mappings in order.
path_parts = _GetNameToPathSeparator(path, data_type)
path_root, _ = _GetPathRootAndSeparatorFromDataType(data_type)
return {
'data': {
'host':
host,
'project':
project,
'ref':
ref,
'revision':
revision,
'path':
path,
'platform':
platform,
'platform_ui_name':
_POSTSUBMIT_PLATFORM_INFO_MAP[platform]['ui_name'],
'path_root':
path_root,
'data':
data,
'data_type':
data_type,
'path_parts':
path_parts,
'platform_select':
_MakePlatformSelect(host, project, ref, revision, path,
platform),
'banner':
_GetBanner(project),
'warning':
warning,
'next_cursor':
next_cursor,
'prev_cursor':
prev_cursor,
},
'template': template,
}
else:
return BaseHandler.CreateError('Invalid request', 400)
|
"""VaeRnn implementation."""
from argparse import ArgumentParser
from typing import Any, Dict, Tuple
import torch
from torch import nn
from ....arg_parser.utils import str2bool
from ....tokenizer import Tokenizer
from ..base_model import GranularEncoderDecoderModel
from ..loss import LOSS_FACTORY
from ..module import RnnDecoder, RnnEncoder
from ..utils import KLAnnealer
class VaeRnn(GranularEncoderDecoderModel):
"""VaeRnn - variational encoder using RNN with Gaussian prior and approximate posterior."""
def __init__(
self,
name: str,
position: int,
data: Dict[str, str],
vocab_size: int,
embedding_size: int,
tokenizer: Tokenizer,
hidden_size_enc: int = 265,
n_layers_enc: int = 2,
hidden_size_dec: int = 265,
n_layers_dec: int = 2,
bidirectional: bool = False,
latent_size: int = 196,
teacher_forcing: bool = True,
loss_function: str = "ce",
kl_low: float = 0.0,
kl_high: float = 0.1,
kl_n_epochs: int = 100,
kl_start_epoch: int = 0,
inference_check_frequency: int = 50,
**kwargs,
) -> None:
"""Construct VaeRnn.
Args:
name: model name.
position: position of the model.
data: data name mappings.
vocab_size: size of the vocabulary.
embedding_size: size of the embedding vectors.
tokenizer: tokenizer.
hidden_size_enc: encoder hidden size. Defaults to 256.
n_layers_enc: number of layers for the encoder. Defaults to 2.
hidden_size_dec: decoder hidden size. Defaults to 256.
n_layers_dec: number of layers for the decoder. Defaults to 2.
bidirectional: whether the RNN cell is bidirectional. Defaults to False.
latent_size: latent size. Defaults to 196.
teacher_forcing: whether to teacher forcing. Defaults to True.
loss_function: loss function. Defaults to "ce".
kl_low: low KL weight. Defaults to 0.0.
kl_high: high KL weight. Defaults to 0.1.
kl_n_epochs: KL number of epochs. Defaults to 100.
kl_start_epoch: KL starting epoch. Defaults to 0.
inference_check_frequency: frequency for checking inference quality. Defaults to 50.
Raises:
ValueError: in case the provided loss function is not supported.
"""
super().__init__(name=name, data=data)
self.position = position
self.input_key = f"{name}_{data['input']}"
self.target_key = f"{name}_{data['target']}"
self.latent_size = latent_size
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.teacher_forcing = teacher_forcing
self.tokenizer = tokenizer
self.hidden_size_enc = hidden_size_enc
self.n_layers_enc = (n_layers_enc,)
self.hidden_size_dec = hidden_size_dec
self.n_layers_dec = (n_layers_dec,)
self.hidden_factor = (2 if bidirectional else 1) * n_layers_enc
self.loss_function_name = loss_function.lower()
if self.loss_function_name not in LOSS_FACTORY:
raise ValueError(
f"loss_function={self.loss_function_name} not supported. Pick a valid one: {sorted(list(LOSS_FACTORY.keys()))}"
)
self.loss_function = LOSS_FACTORY[self.loss_function_name]
self.fc_mu = nn.Linear(self.hidden_factor * hidden_size_enc, self.latent_size)
self.fc_var = nn.Linear(self.hidden_factor * hidden_size_enc, self.latent_size)
self.encoder = RnnEncoder(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size_enc,
n_layers=n_layers_enc,
bidirectional=bidirectional,
)
self.decoder = RnnDecoder(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size_dec,
n_layers=n_layers_dec,
latent_size=latent_size,
)
self.epoch_counter = 0
self.klannealer = KLAnnealer(
kl_low=kl_low,
kl_high=kl_high,
n_epochs=kl_n_epochs,
start_epoch=kl_start_epoch,
)
self.inference_check_frequency = inference_check_frequency
def decode(self, z: Any, max_len: int = 127, *args, **kwargs) -> Any:
"""Decode a latent space point.
Args:
z: latent point.
max_len: maximum sequence length. Defaults to 127.
Returns:
tuple with decoded texts and token indices.
"""
decoded_texts, token_indices = self.decoder.inference_direct(
z, self.encoder.embedding, self.tokenizer, max_len=max_len
)
return decoded_texts, token_indices
def _sampling_step(self, x: Any, *args, **kwargs) -> Any:
"""Run a sampling step in the model.
Args:
x: model input.
Returns:
model sampling step output.
"""
x, input_embedding = self.encoder(x)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
p, q, z = self.sample(mu, log_var)
return p, q, z, input_embedding
def encode(self, x: Any, *args, **kwargs) -> Any:
"""Encode a sample.
Args:
x: input sample.
Returns:
latent encoding.
"""
_, _, z, _ = self._sampling_step(x)
return z
def encode_decode(self, x: Any, max_len: int = 127, *args, **kwargs) -> Any:
"""Encode and decode a sample.
Args:
x: input sample.
max_len: maximum sequence length. Defaults to 127.
Returns:
decoded sample.
"""
z = self.encode(x)
return self.decode(z, max_len=max_len)
def inference(self, x: Any, *args, **kwargs) -> Any: # type:ignore
"""Run the model in inference mode.
Args:
x: sample.
Returns:
generated output.
"""
max_len = x.size(1)
_, _, z, _ = self._sampling_step(x)
return self.decode(z, max_len=max_len)
def _run_step(self, x: Any, *args, **kwargs) -> Any:
"""Run a step in the model.
Args:
x: model input.
Returns:
model step output.
"""
p, q, z, input_embedding = self._sampling_step(x)
return z, self.decoder(z, input_embedding), p, q
def step(
self,
input_data: Any,
target_data: Any,
device: str = "cpu",
current_epoch: int = 0,
*args,
**kwargs,
) -> Tuple[Any, Any, Any]:
"""Training step for the model.
Args:
input_data: input for the step.
target_data: target for the step.
device: string representing the device to use. Defaults to "cpu".
current_epoch: current epoch. Defaults to 0.
Returns:
a tuple containing the step output, the loss and the logs for the module.
"""
x = input_data
x_out = target_data
# teacher forcing
if self.teacher_forcing:
x_out = x_out[:, 1:].long()
x = x[:, :-1]
z, x_hat, p, q = self._run_step(x)
x_hat = x_hat.view(-1, x_hat.size(-1))
x_target = x_out.contiguous().view(-1)
reconstruction_loss = self.loss_function(x_hat, x_target)
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl_scaling_factor = self.klannealer(current_epoch)
kl = log_qz - log_pz
kl = kl.mean()
kl_scaled = kl * kl_scaling_factor
loss = kl_scaled + reconstruction_loss
logs = {
"reconstruction_loss": reconstruction_loss,
"kl_scaled": kl_scaled,
"kl_unscaled": kl,
"kl_scaling_factor": kl_scaling_factor,
"loss": loss,
}
return z, loss, logs
def val_step(
self,
input_data: Any,
target_data: Any,
device: str = "cpu",
current_epoch: int = 0,
*args,
**kwargs,
) -> Any:
"""Validation step for the model.
Args:
input_data: input for the step.
target_data: target for the step.
device: string representing the device to use. Defaults to "cpu".
current_epoch: current epoch. Defaults to 0.
Returns:
a tuple containing the step output, the loss and the logs for the module.
"""
x = input_data
z, loss, logs = self.step(
input_data=input_data,
target_data=target_data,
device=device,
current_epoch=current_epoch,
)
if current_epoch % self.inference_check_frequency == 0 and current_epoch > 0:
decoded_texts, token_indices = self.inference(x)
reconstructed_texts = 0
decoded_splitted_texts = [
text.split(self.tokenizer.eos_token, 1)[0] for text in decoded_texts
]
for _, text in enumerate(decoded_splitted_texts):
if self.tokenizer.pad_token not in text:
reconstructed_texts += 1
valid_percentage = float(reconstructed_texts) / x.size(0)
reconstructed_bits = torch.sum(
x[:, 1:] == token_indices[:, : x[:, 1:].size(1)]
).item()
reconstructed_bits_percentage = reconstructed_bits / x.numel()
logs.update(
{
"reconstructed_bits": reconstructed_bits_percentage,
"validity": valid_percentage,
}
)
return z, loss, logs
@staticmethod
def add_model_specific_args(
parent_parser: ArgumentParser, name: str, *args, **kwargs
) -> ArgumentParser:
"""Adding to a parser model specific arguments.
Args:
parent_parser: patent parser.
name: model name.
Returns:
updated parser.
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(f"--data_path_{name}", type=str)
parser.add_argument(f"--data_file_{name}", type=str)
parser.add_argument(f"--dataset_type_{name}", type=str)
parser.add_argument(f"--position_{name}", type=int, nargs="+")
parser.add_argument(f"--build_vocab{name}", type=str2bool)
parser.add_argument(f"--vocab_file{name}", type=str)
parser.add_argument(f"--input_{name}", type=str)
parser.add_argument(f"--target_{name}", type=str)
parser.add_argument(f"--checkpoint_path_{name}", type=str)
parser.add_argument(f"--checkpoint_model_name_{name}", type=str)
parser.add_argument(f"--start_from_checkpoint_{name}", type=str2bool)
parser.add_argument(f"--freeze_weights_{name}", type=str2bool)
parser.add_argument(f"--hidden_size_enc_{name}", type=int)
parser.add_argument(f"--hidden_size_dec_{name}", type=int)
parser.add_argument(f"--n_layers_enc_{name}", type=int)
parser.add_argument(f"--n_layers_dec_{name}", type=int)
parser.add_argument(f"--bidirectional_{name}", type=str2bool)
parser.add_argument(f"--latent_size_{name}", type=int)
parser.add_argument(f"--kl_low_{name}", type=float)
parser.add_argument(f"--kl_high_{name}", type=float)
parser.add_argument(f"--kl_n_epochs_{name}", type=int)
parser.add_argument(f"--kl_start_epoch_{name}", type=int)
parser.add_argument(f"--inference_check_frequency_{name}", type=int)
return parser
|
'''Implementation for routing addremove triggers'''
# import genie.libs
from genie.libs.sdk.triggers.addremove.addremove import TriggerAddRemove
class TriggerAddRemoveIpv4StaticRoutes(TriggerAddRemove):
pass
class TriggerAddRemoveIpv6StaticRoutes(TriggerAddRemoveIpv4StaticRoutes):
pass |
import numpy as np
from PIL import Image
import os, cv2, pickle
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
recognizer = cv2.face.LBPHFaceRecognizer_create()
mainDir = os.path.dirname(os.path.abspath(__file__))
imageDir = os.path.join(mainDir,"img")
current_id =0
label_dec = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(imageDir):
for file in files:
path = os.path.join(root,file)
labels = os.path.basename(root).replace(" ","-").lower()
if labels in label_dec:
pass
else:
label_dec[labels] = current_id
current_id = current_id+1
labelID = label_dec[labels]
#print(label_dec)
pilImage = Image.open(path).convert("L")#gray scale
#size = (300,300)
#finalImage = pilImage.resize(size, Image.ANTIALIAS)#resize the image
#image_array = np.array(finalImage,"uint8")
image_array = np.array(pilImage,"uint8")
#print(image_array)
faces = faceCascade.detectMultiScale(image_array, 1.3, 2)
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(labelID)
with open("labels.pkl","wb") as f:
pickle.dump(label_dec, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainner.yml")
|
from formencode import ForEach, Schema, NestedVariables, validators
from pyramid.testing import DummySession
# This always stays the same.
dummy_csrf_token = DummySession().get_csrf_token()
class DummySchema(Schema):
allow_extra_fields = False
foo = validators.String(not_empty=True)
class LooseDummySchema(DummySchema):
allow_extra_fields = True
class DummyObject(object):
pass
class NestedDummySchema(Schema):
allow_extra_fields = False
pre_validators = [NestedVariables]
items = ForEach(DummySchema)
subfields = DummySchema
name = validators.String(not_empty=True)
qty = validators.Int(min=4, max=100)
|
#!/usr/bin/env python
import sys
import os
import simplejson as json
f = open("bootconf.json", "r")
vals_dict = json.load(f)
f.close()
os.putenv('DEBIAN_FRONTEND', 'noninteractive')
os.putenv('TERM', 'dumb')
password=vals_dict['dbpassword']
dbname=vals_dict['dbname']
commands = []
commands.append('sudo -E apt-get -y -q install mysql-server-5.1')
commands.append('sudo -E mysqladmin -u root password %s' % (password))
commands.append('sudo -E mysqladmin --password=%s create %s' % (password, dbname))
commands.append("sudo -E mysql --password=%s -e \"GRANT Select, Insert, Update, Create, Delete ON *.* TO 'root'@'%%' IDENTIFIED BY '%s';\"" % (password, password))
commands.append("sudo -E sed -i 's/bind-address.*/bind-address = 0.0.0.0/' /etc/mysql/my.cnf")
commands.append("sudo -E restart mysql")
for cmd in commands:
print cmd
rc = os.system(cmd)
if rc != 0:
print "ERROR! %d" % (rc)
sys.exit(rc)
print "SUCCESS"
sys.exit(0)
|
"""
"""
from vyapp.plugins import ENV
from vyapp.areavi import AreaVi
ENV['cpsel'] = lambda sep='\n': AreaVi.ACTIVE.cpsel(sep)
ENV['ctsel'] = lambda sep='\n': AreaVi.ACTIVE.ctsel(sep)
ENV['chmode'] = lambda id: AreaVi.ACTIVE.chmode(id)
|
from pprint import pprint
from finnews.client import News
# Create a new instance of the News Client.
news_client = News()
# Grab the CNN Finance News Client.
cnn_finance_client = news_client.cnn_finance
# Grab the All Stories Feed.
content = cnn_finance_client.all_stories()
pprint(content)
# Grab the Top Stories Feed.
content = cnn_finance_client.top_stories()
pprint(content)
# Grab the Most Popular Feed.
content = cnn_finance_client.most_popular()
pprint(content)
# Grab the Companies Feed.
content = cnn_finance_client.companies()
pprint(content)
# Grab the International Feed.
content = cnn_finance_client.international()
pprint(content)
# Grab the Economy Feed.
content = cnn_finance_client.economy()
pprint(content)
# Grab the Video News Feed.
content = cnn_finance_client.video_news()
pprint(content)
# Grab the Media Feed.
content = cnn_finance_client.media()
pprint(content)
# Grab the Markets Feed.
content = cnn_finance_client.markets()
pprint(content)
# # Grab the Morning Buzz Feed - DOES NOT WORK.
# content = cnn_finance_client.morning_buzz()
# news_client.save_to_file(content=content, file_name='cnn_finance_morning_buzz')
# pprint(content)
# Grab the Technology Feed.
content = cnn_finance_client.techonology()
pprint(content)
# Grab the Personal Finance Feed.
content = cnn_finance_client.personal_finance()
pprint(content)
# Grab the Autos Feed.
content = cnn_finance_client.autos()
pprint(content)
# Grab the Colleges Feed.
content = cnn_finance_client.colleges()
pprint(content)
# Grab the Taxes Feed.
content = cnn_finance_client.taxes()
pprint(content)
# Grab the Funds Feed.
content = cnn_finance_client.funds()
pprint(content)
# Grab the Insurance Feed.
content = cnn_finance_client.insurance()
pprint(content)
# Grab the Retirement Feed.
content = cnn_finance_client.retirement()
pprint(content)
# Grab the Luxury Feed.
content = cnn_finance_client.luxury()
pprint(content)
# Grab the Lifestyle Feed.
content = cnn_finance_client.lifestyle()
pprint(content)
# Grab the Real Estate Feed.
content = cnn_finance_client.real_estate()
pprint(content)
# Grab the Small Business Feed.
content = cnn_finance_client.small_business()
pprint(content)
|
"""
Copyright (c) 2015-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.constants import IMAGE_TYPE_DOCKER_ARCHIVE
from atomic_reactor.dirs import BuildDir
from atomic_reactor.plugin import Plugin
from atomic_reactor.util import get_exported_image_metadata
class FetchDockerArchivePlugin(Plugin):
key = 'fetch_docker_archive'
is_allowed_to_fail = False
def __init__(self, workflow):
"""
:param workflow: DockerBuildWorkflow instance
"""
super(FetchDockerArchivePlugin, self).__init__(workflow)
def download_image(self, build_dir: BuildDir):
image = self.workflow.data.tag_conf.get_unique_images_with_platform(build_dir.platform)[0]
image_path = str(build_dir.exported_squashed_image)
image_type = IMAGE_TYPE_DOCKER_ARCHIVE
self.log.info('fetching image %s', image)
self.workflow.imageutil.download_image_archive_tarball(image, image_path)
metadata = get_exported_image_metadata(image_path, image_type)
self.log.info('image for platform:%s available at %s', build_dir.platform, image_path)
return metadata
def run(self):
return self.workflow.build_dir.for_each_platform(self.download_image)
|
import numpy as np
from tensorflow.keras.utils import Sequence
from ulaw import lin2ulaw
def lpc2rc(lpc):
#print("shape is = ", lpc.shape)
order = lpc.shape[-1]
rc = 0*lpc
for i in range(order, 0, -1):
rc[:,:,i-1] = lpc[:,:,-1]
ki = rc[:,:,i-1:i].repeat(i-1, axis=2)
lpc = (lpc[:,:,:-1] - ki*lpc[:,:,-2::-1])/(1-ki*ki)
return rc
class LPCNetLoader(Sequence):
def __init__(self, data, features, periods, batch_size, e2e=False):
self.batch_size = batch_size
self.nb_batches = np.minimum(np.minimum(data.shape[0], features.shape[0]), periods.shape[0])//self.batch_size
self.data = data[:self.nb_batches*self.batch_size, :]
self.features = features[:self.nb_batches*self.batch_size, :]
self.periods = periods[:self.nb_batches*self.batch_size, :]
self.e2e = e2e
self.on_epoch_end()
def on_epoch_end(self):
self.indices = np.arange(self.nb_batches*self.batch_size)
np.random.shuffle(self.indices)
def __getitem__(self, index):
data = self.data[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
in_data = data[: , :, :1]
out_data = data[: , :, 1:]
features = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :-16]
periods = self.periods[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
outputs = [out_data]
inputs = [in_data, features, periods]
lpc = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], 2:-2, -16:]
if self.e2e:
outputs.append(lpc2rc(lpc))
else:
inputs.append(lpc)
return (inputs, outputs)
def __len__(self):
return self.nb_batches
|
import os
import numpy as np
from scipy import stats
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
from sklearn.isotonic import IsotonicRegression
from statsmodels.base.model import GenericLikelihoodModel
def logspace_diagmean(mat, start=1, stop=None, num=100):
n = mat.shape[0]
if stop is None:
stop = n-1
seq = np.unique(np.logspace(np.log10(start), np.log10(stop),
num=num, dtype=int))
m = seq.shape[0]
dm = np.zeros(m)
for i in np.arange(m):
dm[i] = np.nanmean(np.diag(mat, seq[i]))
return dm, seq
def diagmean(mat, start=1, stop=None):
n = mat.shape[0]
if stop is None:
stop = n
seq = np.arange(start, stop, dtype=int)
m = seq.shape[0]
dm = np.zeros(m)
for i in np.arange(m):
dm[i] = np.nanmean(np.diag(mat, seq[i]))
return dm, seq
def estimate_alpha(c, d, start, plot=False, savefile=None):
dmean, seq = diagmean(d, start=start)
idx = ~np.isnan(dmean)
# fit relation between d = A1*s^B1
p1, _ = curve_fit(lambda s, A, B: A*np.power(s, B),
seq[idx], dmean[idx])
A1, B1 = p1
if plot:
fig, axes = plt.subplots(1, 2, figsize=(24, 8))
axes[0].loglog(seq, dmean, label='distance')
axes[0].loglog(seq, A1 * np.power(seq, B1), label='curve fit')
axes[0].set_xlabel('diagonals')
axes[0].set_ylabel('average distance')
axes[0].legend()
cmean, seq = diagmean(c, start=start)
idx = ~np.isnan(cmean)
# fit relation between c = A2*s^B2
p2, _ = curve_fit(lambda s, A, B: A*np.power(s, B),
seq[idx], cmean[idx])
A2, B2 = p2
if plot:
axes[1].loglog(seq, cmean, label='contact')
axes[1].loglog(seq, A2 * np.power(seq, B2), label='curve fit')
axes[1].set_xlabel('diagonals')
axes[1].set_ylabel('average contact')
axes[1].legend()
plt.savefig(savefile)
# d=A1*s^B1 => s=(d/A1)^(1/B1)
# c=A2*s^B2
# c=[A2*A1^(-B2/B1)]*d^(B2/B1)
# c=beta*d^alpha
alpha = B2/B1
return alpha
def estimate_beta(conmat, conpat, dmat, dpat, alpha_mat, alpha_pat, mask):
beta = (np.sum(conmat[mask]) + np.sum(conpat[mask])) / \
(np.sum(np.power(dmat[mask], alpha_mat)) + np.sum(np.power(dpat[mask], alpha_pat)))
return beta
def zip_pmf(x, pi, lambda_):
if pi < 0 or pi > 1 or lambda_ <= 0:
return np.zeros_like(x)
else:
return (x == 0) * pi + (1 - pi) * stats.poisson.pmf(x, lambda_)
class ZeroInflatedPoisson(GenericLikelihoodModel):
def __init__(self, endog, exog=None, **kwds):
if exog is None:
exog = np.zeros_like(endog)
super(ZeroInflatedPoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
pi = params[0]
lambda_ = params[1]
return -np.log(zip_pmf(self.endog, pi=pi, lambda_=lambda_))
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
if start_params is None:
p0 = (self.endog == 0).mean()
excess_zeros = p0 * 0.5
lambda_start = self.endog.sum() * 1. / (self.endog.size * (1 - excess_zeros))
start_params = np.array([excess_zeros, lambda_start])
return super(ZeroInflatedPoisson, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun, **kwds)
def fit_zip(mat, seq):
mus = []
gammas = []
for idx, i in enumerate(seq):
x = np.diag(mat, i)
if (~np.isnan(x)).sum() > 0:
model = ZeroInflatedPoisson(x[~np.isnan(x)])
results = model.fit()
gamma = 1. - results.params[0]
mu = results.params[1]
gammas.append(gamma)
mus.append(mu)
else:
gammas.append(np.nan)
mus.append(np.nan)
return np.array(mus), np.array(gammas)
def estimate_gamma(conmat, conpat, start, plot=False, outdir=None):
# TODO remove outliers
n = conmat.shape[0]
x = np.arange(start, n)
_, gammas_mat = fit_zip(conmat, x)
_, gammas_pat = fit_zip(conpat, x)
bins = np.unique(np.logspace(np.log10(x.min()), np.log10(x.max()),
num=200, dtype=int))
binning = []
for i in range(len(bins) - 1):
binning.append((bins[i], bins[i + 1] - 1))
meangammas = map(lambda t: np.nanmean(
np.concatenate((gammas_mat[t[0]-start:t[1]-start+1],
gammas_pat[t[0]-start:t[1]-start+1]))),
binning)
meanbinning = map(lambda t: np.nanmean(np.arange(t[0], t[1] + 1)), binning)
meanbinning = np.append(meanbinning, x.max())
meangammas = np.append(meangammas, meangammas[-1])
uspl = UnivariateSpline(x=meanbinning, y=meangammas,
s=min(np.nanmin(gammas_mat), np.nanmin(gammas_pat)) ** 2)
ir = IsotonicRegression(increasing=False)
newgammas = ir.fit_transform(x, uspl(x))
if plot:
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(x, gammas_mat, 'r.', alpha=0.5, label='maternal gamma')
ax.plot(x, gammas_pat, 'c.', alpha=0.3, label='paternal gamma')
ax.plot(meanbinning, meangammas, 'yo', label='binning gamma')
ax.plot(x, uspl(x), 'g-', lw=2, label='spline fitting')
ax.plot(x, newgammas, 'b-', lw=2, label='isotonic regression')
ax.legend()
plt.savefig(os.path.join(outdir, 'simulated_gamma.png'))
gammas = np.full(n-1, np.nan)
gammas[start-1:] = newgammas
return gammas
def sampling(beta, d, alpha):
mu = beta * (d ** alpha)
mu[np.isinf(mu)] = 0
t = np.random.poisson(mu)
np.fill_diagonal(t, 0)
t[np.tri(t.shape[0], k=-1).astype(bool)] = 0
t = (t + t.T).astype(float)
return t
def plot_simulated(simmat, simpat, conmat, conpat, start, outdir):
fig, axes = plt.subplots(1, 2, figsize=(24, 8))
con_sim, seq_sim = logspace_diagmean(simmat, start=start, num=500)
con_pop, seq_pop = logspace_diagmean(conmat, start=start, num=500)
axes[0].loglog(seq_sim, con_sim, label='maternal simulated')
axes[0].loglog(seq_pop, con_pop, label='maternal population')
axes[0].legend()
con_sim, seq_sim = logspace_diagmean(simpat, start=start, num=500)
con_pop, seq_pop = logspace_diagmean(conpat, start=start, num=500)
axes[1].loglog(seq_sim, con_sim, label='paternal simulated')
axes[1].loglog(seq_pop, con_pop, label='paternal population')
axes[1].legend()
plt.savefig(os.path.join(outdir, 'simulated_population.png')) |
import setuptools
if __name__ == '__main__':
setuptools.setup(
name='Latex to Wolfram',
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'latex2wolfram = latex2wolfram.main:main',
],
},
setup_requires=['pytest-runner', 'ply'],
tests_require=['pytest'],
) |
import sys
from pymemcache.client.base import Client
TIMEOUT_SEC = 15
ITERATION_NUM = 100
ip, port = (sys.argv[1], sys.argv[2]) if len(sys.argv) == 3 else ("localhost", "11211")
## Connect
print(f"connecting to {ip}:{port}")
try:
client = Client((ip, port), connect_timeout=TIMEOUT_SEC, timeout=TIMEOUT_SEC)
for i in range(ITERATION_NUM):
key, value = f"some_key-{i}", f"some_value-{i}"
# cmd "set", create/update key:value
assert client.set(key, value)
# cmd "get", get value with key
result = client.get(key).decode("utf-8")
assert result == value
# cmd "delete", delete key
assert client.delete(key)
# cmd "add", create key:value
assert client.set(key, value)
assert client.add(key, value)
# cmd "append", append value
res = client.append(key, "_suffix")
value += "_suffix"
assert client.get(key).decode("utf-8") == value
# cmd "preppend", preppend value
res = client.prepend(key, "prefix_")
value = "prefix_" + value
assert client.get(key).decode("utf-8") == value
# cmd "replace", preppend value
res = client.replace(key, "empty")
assert client.get(key).decode("utf-8") == "empty"
print("All tests passed!")
finally:
client.close() |
import sys
import numpy as np
# sum of deliciousness が border以上の組み合わせが K個以上あればTrue
def is_ok(a, b, c, X, Y, Z, K, border):
cnt = 0
for i in range(X):
for j in range(Y):
for k in range(Z):
if a[i] + b[j] + c[k] < border:
break
cnt += 1
if cnt >= K:
return True
return False
def main():
x, y, z, K = map(int, sys.stdin.readline().split())
abc = np.array(sys.stdin.read().split(), dtype=np.int64)
a = abc[:x]
b = abc[x:x + y]
c = abc[x + y:x + y+ z]
a = np.sort(a)[::-1]
b = np.sort(b)[::-1]
c = np.sort(c)[::-1]
l, r = 3, 3 * 10 ** 10
while l <= r:
border = (l + r) // 2
if is_ok(a, b, c, x, y, z, K, border):
l = border + 1
else:
r = border - 1
res = []
cnt = 0
for i in range(x):
for j in range(y):
for k in range(z):
sum_deli = a[i] + b[j] + c[k]
if sum_deli < r:
break
res.append(sum_deli)
cnt += 1
if cnt >= K:
break
res = sorted(res, reverse=True)[:K]
print('\n'.join(map(str, res)))
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
##########################################################################
# Copyright 2022 Xu Ruijun
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
"""
CPU temperature PID controller
The program *must* run in root user.
Tested it on this environment:
Computer: Lenovo-Y50-70 Laptop
CPU: Intel(R) Core(TM) i5-4200H CPU @ 2.80GHz
OS: Ubuntu 20.04 LTS
Load: BOINC Client 7.16.6
"""
import os
import time
import psutil
import math
# CPU params
N = psutil.cpu_count(logical=True)
pmin = psutil.cpu_count(logical=False)/N*100
# please edit Freq(MHz) for your computer
m = 800 # min
M = 2800 # max
# PID params
Target = 72
Kp = 2
Ki = 0.75
Kd = 0.5
def get_temp():
temps = psutil.sensors_temperatures()
return temps['coretemp'][0].current
def set_clock(MHz):
kHz = int(MHz*1000)
for i in range(N):
fn = f'/sys/devices/system/cpu/cpu{i}/cpufreq/scaling_max_freq'
with open(fn, 'w') as f:
f.write(str(kHz))
def get_clock():
f = int(psutil.cpu_freq().current)
if f > M:
return M - 100
if f < m:
return m + 100
return f
# I did't find way to get CPU Voltage, so estimate Power only by Frequency.
# This parameters isn't measurement, it's estimated by TDP.
# P = f*(1 + f)*5
# f = (sqrt(1 + 4*P/5) - 1)/2
# units f:GHz, P:W
def set_power(x):
if x < 0:
x = 0
clock = (math.sqrt(1 + 4*x/5) - 1)/2
clock *= 1000
ret = 0
if x < 0 or clock < m:
clock = m
ret = -1
elif clock > M:
clock = M
ret = 1
set_clock(clock)
return ret, clock
def get_power():
clock = get_clock()/1000
return clock*(1 + clock)*5
def main():
delta = Target - get_temp()
inte = (get_power() - delta*Kp)/Ki
d_ = delta
c = 10000
while True:
while psutil.cpu_percent() < pmin and get_clock() >= c-200:
time.sleep(0.5)
now = get_temp()
delta = Target - now
diff = delta - d_
d_ = delta
if abs(diff) <= 1 or abs(delta) <= 2:
diff = 0
if abs(delta) <= 1:
delta *= 0.75
power = diff*Kd + delta*Kp + inte*Ki
r, c = set_power(power)
if r == 0 or r*delta<0: # r == 0 或 r与delta异号
inte += delta
print(f'{-delta:+2.0f}, {power:4.1f}, {c:4.0f}')
time.sleep(1)
def Is_ready_running():
thepid = os.getpid()
for pid in psutil.pids():
p = psutil.Process(pid)
try:
name = p.name()
except Exception:
pass
else:
if name == 'cpupid.py' and pid != thepid:
return pid
return None
if __name__ == '__main__':
r = Is_ready_running()
if r is not None:
print(f'same name program is running, PID={r}')
else:
main()
|
"""
Copyright (c) 2021 Dell Inc, or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from src.configurator.configurator_parent import *
class JSONConfigurator(ConfiguratorInterface):
"""The configurator takes data from json file."""
def __init__(self, path_to_json_file: str) -> None:
self.path_to_json_file = path_to_json_file
self.conf_dict = {}
self.ovirt_machine = {}
self.terraform_machine = {}
self.load_conf_data()
def load_conf_data(self):
with open(self.path_to_json_file) as file:
config_str = file.read()
EFC_json_config = json.loads(config_str)
self.conf_dict = EFC_json_config["fogs"][0]
def load_ovirt_machine(self):
for machine in self.conf_dict['infrastructure_config']:
if machine['machine_role']['name'] == consts.ovirt_machine_role: # find machine for virtualization_server
self.ovirt_machine = machine
def load_terraform_machine(self):
for machine in self.conf_dict['infrastructure_config']:
if machine['machine_role']['name'] == consts.terraform_machine_role: # find machine for terraform
self.terraform_machine = machine
def get_cloud_provider_ip(self) -> str:
if self.ovirt_machine == {}:
self.load_ovirt_machine()
ip = self.ovirt_machine["network"]["ip"]
if ip_checker(ip):
return ip
else:
raise ValidationValueError('Incorrect engine_BM_IP : ' + ip)
def get_cloud_provider_engine_bm_login(self) -> str:
if self.ovirt_machine == {}:
self.load_ovirt_machine()
bm_login = self.ovirt_machine["credential"]["login"]
if name_checker(bm_login):
return bm_login
else:
raise ValidationValueError('Incorrect engine_BM_login : ' + bm_login)
def get_cloud_provider_engine_bm_password(self) -> str:
if self.ovirt_machine == {}:
self.load_ovirt_machine()
password = self.ovirt_machine["credential"]["password"]
if password_checker(password):
return password
else:
raise ValidationValueError(
'Incorrect engine_BM_password : ' + password)
def get_cloud_provider_domain(self) -> str:
domain = self.conf_dict["cloud_provider"]["network"]["dns_name"]
if cloud_provider_domain_check(domain):
return domain
else:
raise ValidationValueError('Incorrect oVirt_domain_name : ' + domain)
def get_cloud_provider_login(self) -> str:
login = self.conf_dict["cloud_provider"]["credential"]["login"]
if cloud_provider_login_check(login):
return login
else:
raise ValidationValueError('Incorrect oVirt_Login : ' + login)
def get_cloud_provider_password(self) -> str:
password = self.conf_dict["cloud_provider"]["credential"]["password"]
if password_checker(password):
return password
else:
raise ValidationValueError('Incorrect oVirt_Password : ' + password)
def get_cloud_provider_templates(self) -> list:
lsit_of_os = []
for machine in self.conf_dict['infrastructure_config']:
os_name = re.sub('[ ?.!/;:]', '', machine['operation_system']['name']).lower() # get machine os
if os_name not in lsit_of_os:
if name_checker(os_name):
lsit_of_os.append(os_name)
else:
raise ValidationValueError('Incorrect oVirt template name: ' + os_name)
return lsit_of_os
def get_terraform_ip(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
ip = self.terraform_machine["network"]["ip"]
if ip_checker(ip):
return ip
else:
raise ValidationValueError('Incorrect terraform_IP : ' + ip)
def get_terraform_login(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
login = self.terraform_machine["credential"]["login"]
if name_checker(login):
return login
else:
raise ValidationValueError('Incorrect terraform_login : ' + login)
def get_terraform_password(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
password = self.terraform_machine["credential"]["password"]
if password_checker(password):
return password
else:
raise ValidationValueError(
'Incorrect terraform_password : ' + password)
def get_terraform_vm_name(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
name = self.terraform_machine["name"]
if name_checker(name):
return name
else:
raise ValidationValueError('Incorrect terraform_name : ' + name)
def get_terraform_domain_name(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
domain = self.terraform_machine["network"]["dns_name"]
if cloud_provider_domain_check(domain):
return domain
else:
raise ValidationValueError(
'Incorrect terraform_domain_name : ' + domain)
def get_terraform_gateway(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
gateway = self.terraform_machine["network"]["gateway"]
if ip_checker(gateway):
return gateway
else:
raise ValidationValueError('Incorrect terraform_gateway : ' + gateway)
def get_terraform_netmask(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
netmask = self.terraform_machine["network"]["netmask"]
if ip_checker(netmask):
return netmask
else:
raise ValidationValueError('Incorrect terraform_netmask : ' + netmask)
def get_terraform_dns_search(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
dns = self.terraform_machine["network"]["dns_search"]
if cloud_provider_domain_check(dns):
return dns
else:
raise ValidationValueError(
'Incorrect terraform_dns_search : ' + dns)
def get_terraform_dns_servers(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
dns_servers = self.terraform_machine["network"]["dns_servers"]
if ip_checker(dns_servers):
return dns_servers
else:
raise ValidationValueError('Incorrect terraform_dns_servers : ' + dns_servers)
def get_terraform_cpu(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
cpu_num = str(self.terraform_machine["resource"]["cpu"])
if hardware_requirements_check(cpu_num):
return cpu_num
else:
raise ValidationValueError('Incorrect terraform_cpu : ' + cpu_num)
def get_terraform_ram(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
ram = str(self.terraform_machine["resource"]["ram"])
if hardware_requirements_check(ram):
return ram
else:
raise ValidationValueError('Incorrect terraform_ram : ' + ram)
def get_terraform_os(self) -> str:
if self.terraform_machine == {}:
self.load_terraform_machine()
terraform_os = re.sub('[ ?.!/;:]', '', self.terraform_machine["operation_system"]["name"]).lower()
if name_checker(terraform_os):
return terraform_os
else:
raise ValidationValueError('Incorrect terraform_os : ' + terraform_os)
|
# -*- coding: utf-8 -*-
# author: @RShirohara
import re
from . import markdown
_RUBY = re.compile(r"{(?P<text>.*?)\|(?P<ruby>.*?)}")
_TCY = re.compile(r"\^(?P<text>.*?)\^")
_NEWPAGE = re.compile(r"^={3,}$")
def build_inlineparser():
parser = InlineParser()
parser.reg.add(parser.code_inline, "code_inline", 60)
parser.reg.add(parser.ruby, "ruby", 50)
parser.reg.add(parser.tcy, "tcy", 40)
parser.reg.add(parser.bold, "bold", 30)
parser.reg.add(parser.image, "image", 20)
parser.reg.add(parser.link, "link", 10)
return parser
def build_blockparser():
parser = BlockParser()
parser.reg.add(parser.code_block, "code_block", 60)
parser.reg.add(parser.newpage, "newpage", 50)
parser.reg.add(parser.header, "header", 40)
parser.reg.add(parser.item_list, "item_list", 30)
parser.reg.add(parser.quote, "quote", 20)
parser.reg.add(parser.para, "para", 10)
return parser
def build_renderer():
renderer = Renderer()
renderer.reg.add(renderer.bold, "bold", 120)
renderer.reg.add(renderer.code_inline, "code_inline", 110)
renderer.reg.add(renderer.image, "image", 100)
renderer.reg.add(renderer.link, "link", 90)
renderer.reg.add(renderer.ruby, "ruby", 80)
renderer.reg.add(renderer.tcy, "tcy", 70)
renderer.reg.add(renderer.newpage, "newpage", 60)
renderer.reg.add(renderer.header, "header", 50)
renderer.reg.add(renderer.code_block, "code_block", 40)
renderer.reg.add(renderer.item_list, "item_list", 30)
renderer.reg.add(renderer.quote, "quote", 20)
renderer.reg.add(renderer.para, "para", 10)
return renderer
class InlineParser(markdown.InlineParser):
def ruby(self, source):
_pos = 0
while True:
_match = _RUBY.search(source, pos=_pos)
if not _match:
break
_pos = _match.end(0)
_dict = _match.groupdict()
_old = _match.group(0)
_new = '{"type": "ruby", ' + \
f'"content": ["{_dict["text"]}", "{_dict["ruby"]}"]' + \
"}"
source = source.replace(_old, _new)
return source
def tcy(self, source):
_pos = 0
while True:
_match = _TCY.search(source, pos=_pos)
if not _match:
break
_pos = _match.end(0)
_dict = _match.groupdict()
_old = _match.group(0)
_new = '{"type": "tcy", ' + \
f'"content": ["{_dict["text"]}"]' + \
"}"
source = source.replace(_old, _new)
return source
class BlockParser(markdown.BlockParser):
def newpage(self, source):
_match = _NEWPAGE.match(source)
if not _match:
return
result = {
"type": "newpage",
}
return result
class Renderer(markdown.Renderer):
def newpage(self, source):
return "========"
def ruby(self, source):
text = source["content"][0]
ruby = source["content"][1]
return "{" + f"{text}|{ruby}" + "}"
def tcy(self, source):
return f'^{source["content"][0]}^'
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.utils.encoding import smart_str
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.db import connections
from django.core.paginator import InvalidPage, EmptyPage, Paginator
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib import messages
from servers.models import Server, SshKey, ServerUser
from servers.forms import ServerForm, ServerSshKey, SshKeyForm
from groups.models import Group
from django.contrib.auth.models import User
@login_required
def servers_list(request):
"""Show the list of servers"""
if request.user.is_staff:
liste = Server.objects.order_by('-is_vm', 'vm_host__name', 'name').all()
else:
liste = Server.objects.order_by('-is_vm', 'vm_host__name', 'name').filter(users_owning_the_server=request.user).all()
return render_to_response('servers/servers/list.html', {'liste': liste}, context_instance=RequestContext(request))
@login_required
def servers_show(request, pk):
"""Show details of an Server"""
object = get_object_or_404(Server, pk=pk)
if not request.user.is_staff and not request.user in object.users_owning_the_server.all():
raise Http404
groups = Group.objects.order_by('name').all()
return render_to_response('servers/servers/show.html', {'object': object, 'groups': groups}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def servers_edit(request, pk):
"""Edit an server"""
try:
object = Server.objects.get(pk=pk)
except:
object = Server()
if request.method == 'POST': # If the form has been submitted...
form = ServerForm(request.POST, instance=object)
users = request.POST.get('users')
if form.is_valid(): # If the form is valid
object = form.save()
toadd_users = users.split(',')
for u in object.serveruser_set.all():
if u.name in toadd_users:
toadd_users.remove(u.name)
else:
u.delete()
for u in toadd_users:
ServerUser(server=object, name=u).save()
messages.success(request, 'The server has been saved.')
return redirect(reverse('servers.views.servers_list'))
else:
form = ServerForm(instance=object)
users = ''
if object.pk:
for u in object.serveruser_set.all():
users += u.name + ','
users = users[:-1] # Remove last ,
all_users = ['root']
for u in User.objects.order_by('username').all():
all_users.append(u.username)
return render_to_response('servers/servers/edit.html', {'form': form, 'users': users, 'all_users': all_users}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def servers_delete(request, pk):
"""Delete an server"""
object = get_object_or_404(Server, pk=pk)
object.delete()
messages.success(request, 'Server has been deleted.')
return redirect(reverse('servers.views.servers_list', args=()))
@login_required
@staff_member_required
def servers_keys_add(request, pk):
server = get_object_or_404(Server, pk=pk)
baseKey = SshKey(server=server)
if request.method == 'POST': # If the form has been submitted...
form = ServerSshKey(instance=baseKey, data=request.POST)
if form.is_valid(): # If the form is valid
form.save()
messages.success(request, 'The ssh key has been added.')
if pk:
return redirect(reverse('servers.views.servers_show', args=(pk, )))
else:
return redirect(reverse('servers.views.me',))
else:
form = ServerSshKey(instance=baseKey)
return render_to_response('servers/add_ssh.html', {'form': form, 'ownMode': not pk}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def servers_keys_delete(request, pk, keyPk):
"""Delete a server key"""
server = get_object_or_404(Server, pk=pk)
baseKey = get_object_or_404(SshKey, server=server, pk=keyPk)
baseKey.delete()
messages.success(request, 'The ssh key has been deleted.')
return redirect(reverse('servers.views.servers_show', args=(pk, )))
@login_required
@staff_member_required
def keys_list(request):
"""Show the list of keys"""
liste = SshKey.objects.order_by('server__name').all()
return render_to_response('servers/keys/list.html', {'liste': liste}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def keys_show(request, pk):
"""Show details of an ssk key"""
object = get_object_or_404(SshKey, pk=pk)
return render_to_response('servers/keys/show.html', {'object': object}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def keys_edit(request, pk):
"""Edit an ssh key"""
try:
object = SshKey.objects.get(pk=pk)
except:
object = SshKey()
if request.method == 'POST': # If the form has been submitted...
form = SshKeyForm(request.POST, instance=object)
if form.is_valid(): # If the form is valid
object = form.save()
messages.success(request, 'The ssh key has been saved.')
return redirect(reverse('servers.views.keys_list'))
else:
form = SshKeyForm(instance=object)
return render_to_response('servers/keys/edit.html', {'form': form}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def keys_delete(request, pk):
"""Delete an ssh key"""
object = get_object_or_404(SshKey, pk=pk)
object.delete()
messages.success(request, 'The ssh key has been deleted.')
return redirect(reverse('servers.views.keys_list', args=()))
@login_required
@staff_member_required
def servers_groups_add(request, pk):
"""Add a server into a group"""
server = get_object_or_404(Server, pk=pk)
group = get_object_or_404(Group, pk=request.GET.get('groupPk'))
key_pk = request.GET.get('keyPk')
if key_pk == '0':
group.servers.add(server)
messages.success(request, 'The server has been added to the group (all keys).')
else:
key = get_object_or_404(SshKey, pk=key_pk, server=server)
group.servers_keys.add(key)
messages.success(request, 'The server has been added to the group (' + key.__unicode__() + ').')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_groups_delete(request, pk, groupPk):
"""Delete a server from a group"""
server = get_object_or_404(Server, pk=pk)
group = get_object_or_404(Group, pk=groupPk)
group.servers.remove(server)
messages.success(request, 'The server has been removed from the group.')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_groups_key_delete(request, pk, groupPk, keyPk):
"""Delete a server key from a group"""
server = get_object_or_404(Server, pk=pk)
key = get_object_or_404(SshKey, pk=keyPk, server=server)
group = get_object_or_404(Group, pk=groupPk)
group.servers_keys.remove(key)
messages.success(request, 'The key has been removed from the group.')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_groupsaccess_add(request, pk):
"""Add a server to allowed server into a group"""
server = get_object_or_404(Server, pk=pk)
group = get_object_or_404(Group, pk=request.GET.get('groupPk'))
user_pk = request.GET.get('userPk')
if user_pk == '0':
group.allowed_servers.add(server)
messages.success(request, 'The server has been added to the group (all users).')
else:
user = get_object_or_404(ServerUser, pk=user_pk, server=server)
group.allowed_servers_users.add(user)
messages.success(request, 'The server has been added to the group (' + user.name + ').')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_groupsaccess_delete(request, pk, groupPk):
"""Delete a server form a group's allowed server"""
server = get_object_or_404(Server, pk=pk)
group = get_object_or_404(Group, pk=groupPk)
group.allowed_servers.remove(server)
messages.success(request, 'The server has been removed from the group.')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_groupsaccess_user_delete(request, pk, groupPk, userPk):
"""Remove a user from a group's allowed user"""
server = get_object_or_404(Server, pk=pk)
user = get_object_or_404(ServerUser, pk=userPk)
group = get_object_or_404(Group, pk=groupPk)
group.allowed_servers_users.remove(user)
messages.success(request, 'The user has been removed from the group.')
return redirect(reverse('servers.views.servers_show', args=(server.pk, )))
@login_required
@staff_member_required
def servers_map(request):
"""Show a nice map of servers"""
proxmox_servers = Server.objects.order_by('name').filter(is_proxmox=True).all()
outside_servers = Server.objects.order_by('name').filter(is_proxmox=False, is_vm=False).all()
return render_to_response('servers/map.html', {'proxmox_servers': proxmox_servers, 'outside_servers': outside_servers}, context_instance=RequestContext(request))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 14:49:57 2019
Title: MP4-Medical Image Processing
@author: MP4 Team
"""
# Main Program to Start
# Importing required PyQt5 class
import sys
import os
from PyQt5.QtGui import QIcon, QColor
from PyQt5.QtWidgets import (QApplication, QMainWindow, QAction, QWidget, QTableWidget, QTableWidgetItem,
QVBoxLayout, QFileDialog)
# Importing required Matplotlib and Numpy class
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
#from matplotlib.pyplot import plt
import numpy as np
import re
import h5py
# Importing nibabel for Nifti files
import nibabel as nib
# Importing backend class for Image display and Image event
from cap_0519_display import ImageDisplay
from cap_0519_ccontroller import ClickController
from cap_0519_scontroller import ScrollController
from cap_0519_roi import SingleWindowCtr
from cap_0519_segmen import ValidateWindowCtr
from cap_0519_patient import *
# Window to display image in single axes
class ValidateWindow(QMainWindow):
def __init__(self, vol_trans, vol_truth, vol_segmen, index_trans, index_truth, index_segmen):
super().__init__()
self.vol_trans = vol_trans
self.vol_truth = vol_truth
self.vol_segmen = vol_segmen
self.index_trans = index_trans
self.index_truth = index_truth
self.index_segmen = index_segmen
# Set validation window dimension and title; and plot
self.setWindowIcon(QIcon('menu_icon/uomLog.png'))
self.setWindowTitle('Validation of Medical Image Segmentation')
self.setGeometry(450, 50, 650, 650)
self.plot_validate_image()
# Plot two image side by side on the window
def plot_validate_image(self):
fig = Figure()
self.canvas = FigureCanvas(fig)
ax_trans = fig.add_subplot(223, adjustable='box', aspect='auto')
im_trans = ax_trans.imshow(self.vol_trans[self.index_trans[-1]], cmap="gray")
ax_trans.set_title('Voxel Image', color = 'b')
ax_truth = fig.add_subplot(221, adjustable='box', aspect='auto')
im_truth = ax_truth.imshow(self.vol_truth[self.index_truth[-1]], cmap="gray")
ax_truth.set_title('Ground Truth Image', color = 'b')
ax_segmen = fig.add_subplot(222, adjustable='box', aspect='auto')
im_segmen = ax_segmen.imshow(self.vol_segmen[self.index_segmen[-1]], cmap="gray")
ax_segmen.set_title('Segmented Image', color = 'b')
self.segmen_ctrl = ValidateWindowCtr(fig, im_trans, im_truth, im_segmen, self.vol_trans, self.vol_truth,
self.vol_segmen, ax_trans, ax_truth, ax_segmen, self.index_trans,
self.index_truth, self.index_segmen)
# Display canvas of Matplotlib as central Widget
self.widget = QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QVBoxLayout())
self.widget.layout().addWidget(self.canvas)
# Window to display ROI data
class RoiWindow(QMainWindow):
def __init__(self, roi_data):
super().__init__()
self.roi_data = roi_data
self.roi_window()
# ROI toolbar and window
def roi_window(self):
# Set roi window dimension and title
self.setWindowIcon(QIcon('menu_icon/uomLog.png'))
self.setWindowTitle('Current Region of Interest data')
self.setGeometry(175, 75, 850, 350)
# Create toolbar to save and close data
# ROI save toolbar
toolbar = self.addToolBar('ROI save and close')
save_action = QAction(QIcon('menu_icon/file_save.png'), 'Save ROI data', self)
save_action.triggered.connect(self.save_roi_data)
toolbar.addAction(save_action)
# ROI close toolbar
close_action = QAction(QIcon('menu_icon/file_close.png'), 'Close ROI window', self)
close_action.triggered.connect(self.close)
toolbar.addAction(close_action)
# Display current roi data
self.create_table()
self.main_widget = QWidget(self)
layout = QVBoxLayout(self.main_widget)
layout.addWidget(self.tableWidget)
self.setCentralWidget(self.main_widget)
# Function to save roi data
def save_roi_data(self):
text_file = open('roi_data_file.txt', 'a')
print(len(self.roi_data))
for items in self.roi_data:
for item in items:
text_file.write(str(item) + ' ')
text_file.write('\n')
# Open saved text file
os.startfile('roi_data_file.txt')
text_file.close()
# Function to create table and show roi data
def create_table(self):
table_headings = ['Patient_ID', 'Plane', 'Slice No', 'x_1', 'y_1', 'x_2', 'y_2', 'Remove']
self.tableWidget = QTableWidget()
if len(self.roi_data) > 0:
self.tableWidget.setRowCount(len(self.roi_data)+1)
self.tableWidget.setColumnCount(len(table_headings))
for i in range(len(self.roi_data)+1):
for j in range(len(table_headings)):
if i == 0 or j == len(table_headings)-1:
if i == 0:
self.tableWidget.setItem(i, j, QTableWidgetItem(table_headings[j]))
else:
self.tableWidget.setItem(i,j, QTableWidgetItem("Delete"))
self.tableWidget.item(i,j).setBackground(QColor(66,116,245))
else:
self.tableWidget.setItem(i, j, QTableWidgetItem(str(self.roi_data[i-1][j])))
else:
self.statusBar().showMessage('First update the region of interest!', 2500)
# Double click to delete
self.tableWidget.doubleClicked.connect(self.delete_row)
# To delete the rows
def delete_row(self):
for tblItem in self.tableWidget.selectedItems():
if tblItem.text() == 'Delete':
self.roi_data.pop(tblItem.row()-1)
self.tableWidget.removeRow(tblItem.row())
# Window to display image in single axes
class SingleWindow(QMainWindow):
def __init__(self, ax_name, vol_data, ax_index, roi_data, pat_id):
super().__init__()
self.ax_name = ax_name
self.vol_data = vol_data
self.ax_index = ax_index
self.roi_data = roi_data
self.pat_id = pat_id
self.roi_data_tem = []
self.menu_single_window()
# Menu and toolbar for the window
def menu_single_window(self):
# Set single window dimension and title
self.setWindowIcon(QIcon('menu_icon/uomLog.png'))
self.setWindowTitle('Single Axes Image ')
self.setGeometry(750, 50, 600, 600)
# Update roi data toolbar
toolbar = self.addToolBar('Update roi data and clear window')
update_action = QAction(QIcon('menu_icon/file_open.png'), 'Update ROI data', self)
update_action.triggered.connect(self.updata_roi_data)
toolbar.addAction(update_action)
clear_action = QAction(QIcon('menu_icon/file_file_1.png'), 'Clear window', self)
clear_action.triggered.connect(self.plot_single_image)
toolbar.addAction(clear_action)
close_action = QAction(QIcon('menu_icon/file_close.png'), 'Close single window', self)
close_action.triggered.connect(self.close)
toolbar.addAction(close_action)
self.plot_single_image()
# Update ROI data
def updata_roi_data(self):
self.roi_data.append(self.roi_data_tem[-1])
self.statusBar().showMessage('Roi updated!', 1500)
# Plot single image on window
def plot_single_image(self):
fig = Figure()
self.canvas = FigureCanvas(fig)
ax_single = fig.add_subplot(111, adjustable='box', aspect='auto')
im_single = ax_single.imshow(self.vol_data[self.ax_index[-1]], cmap="gray")
ax_single.set_title(self.ax_name, color = 'b')
self.roi_ctrl = SingleWindowCtr(fig, im_single, self.vol_data, ax_single, self.ax_index, self.roi_data_tem,
self.pat_id, self.ax_name)
# Display canvas of Matplotlib as central Widget
self.widget = QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QVBoxLayout())
self.widget.layout().addWidget(self.canvas)
# Main window class
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.roi_data, self.tran_index, self.fron_index, self.sagi_index = [], [0], [0], [0]
self.index_truth, self.index_segmen = [0], [0]
self.menu_window()
# Menubar function
def menu_window(self):
# Set main window dimension and title
self.setWindowIcon(QIcon('menu_icon/uomLog.png'))
self.setWindowTitle('MP4 Medical Image Processor')
self.setGeometry(75, 50, 700, 650)
# Create main menu
main_menu = self.menuBar()
basic_menu = main_menu.addMenu('Basic')
analysis_menu = main_menu.addMenu('Image Analysis')
evalution_menu = main_menu.addMenu('Evaluation')
# Create submenu action and add to main menu
# Basic submenu
nifti_action = QAction(QIcon('menu_icon/file_file.png'), 'Load Nifti', self)
nifti_action.setShortcut('Ctrl+N')
nifti_action.triggered.connect(self.load_nifti_file)
basic_menu.addAction(nifti_action)
# Image Analysis submenu
segmen_action = QAction('Run Segmentation', self)
segmen_action.triggered.connect(self.load_segmentation_file)
analysis_menu.addAction(segmen_action)
# Evalution submenu
gtruth_action = QAction('Search round Truth', self)
gtruth_action.triggered.connect(self.load_nifti_file)
evalution_menu.addAction(gtruth_action)
# Create toolbar for most common functions
toolbar = self.addToolBar('Change Click or scroll View')
# Make image clickable
cursor_action = QAction(QIcon('menu_icon/mouse_click.png'), 'Image click', self)
cursor_action.triggered.connect(self.set_click_enabled)
toolbar.addAction(cursor_action)
# Make image scrollable
scroll_action = QAction(QIcon('menu_icon/mouse_scroll.png'), 'Image scroll', self)
scroll_action.triggered.connect(self.set_scroll_enabled)
toolbar.addAction(scroll_action)
# Zoom Transverse plane image
tran_action = QAction(QIcon('menu_icon/tran.png'), 'Show transverse plane', self)
tran_action.triggered.connect(self.show_tran_plane)
toolbar.addAction(tran_action)
# Zoom Frontal plane image
fron_action = QAction(QIcon('menu_icon/fron.png'), 'Show frontal plane', self)
fron_action.triggered.connect(self.show_fron_plane)
toolbar.addAction(fron_action)
# Zoom Sagittal plane image
sagi_action = QAction(QIcon('menu_icon/sagi.png'), 'Show sagittal plane', self)
sagi_action.triggered.connect(self.show_sagi_plane)
toolbar.addAction(sagi_action)
# Display roi table
roi_action = QAction(QIcon('menu_icon/table_layout.png'), 'Show roi data', self)
roi_action.triggered.connect(self.show_roi_data)
toolbar.addAction(roi_action)
# Direct User to use Menu to display images
self.statusBar().showMessage('Use Basic Menu to display medical images', 5000)
# Extract pixel data from medical image file of NIFTI format
def load_nifti_file(self):
try:
# To extract pixel data
vol_file_path, _= QFileDialog.getOpenFileName(self, 'Select Medical image', '', 'Image Files (*.nii *.nii.gz)')
folder_path, vol_name = os.path.split(vol_file_path)
wexten_name = os.path.splitext(vol_name)[0]
file_num = wexten_name.split('-')[-1]
self.gtruth_file_path = folder_path+'\\'+'segmentation-'+file_num+'.nii'
self.segmen_file_path = folder_path+'\\'+'prediction-'+file_num+'.h5'
nifti_file = nib.load(vol_file_path)
pixel_arr = nifti_file.get_fdata()*255
tran_arr = pixel_arr.transpose(2, 1, 0)
fron_arr = pixel_arr.transpose(1, 2, 0)
sagi_arr = pixel_arr.transpose(0, 2, 1)
self.vol_tran = np.flip(tran_arr)
self.vol_fron = np.flip(fron_arr)
self.vol_sagi = np.flip(sagi_arr)
self.tran_index[0] = self.vol_tran.shape[0] // 3
self.fron_index[0] = self.vol_fron.shape[0] // 2
self.sagi_index[0] = self.vol_sagi.shape[0] // 2
# To extract header data
self.new_matdata_path = None
for file in os.listdir(folder_path):
if file.endswith('.mat'):
matdata_path = os.path.join(folder_path, file)
self.new_matdata_path = matdata_path.replace(os.sep, '/')
# Plot the image using pixel data
self.plot_images()
except:
self.statusBar().showMessage('File type not supported or empty !', 2500)
# Extract pixel data from h5 and ground truth file
def load_segmentation_file(self):
try:
# To extract pixel data
nifti_file = nib.load(self.gtruth_file_path)
voxel_nifti = nifti_file.get_fdata()*255
voxel_segmen = h5py.File(self.segmen_file_path,"r")["predictions"].value
nifti_pixel_arr = np.flip(voxel_nifti.transpose(2, 1, 0))
segmen_pixel_arr = np.flip(voxel_segmen.transpose(2, 1, 0))
self.index_truth[0] = nifti_pixel_arr.shape[0] // 3
self.index_segmen[0] = segmen_pixel_arr.shape[0] // 3
# Plot the image using pixel data
self.valid_window = ValidateWindow(self.vol_tran, nifti_pixel_arr, segmen_pixel_arr, self.tran_index,
self.index_truth, self.index_segmen)
self.valid_window.show()
except FileNotFoundError:
self.statusBar().showMessage(FileNotFoundError, 2500)
# Make image clicking enabled
def set_click_enabled(self):
try:
self.click_image = self.canvas.mpl_connect('button_press_event', self.click_ctrl.button_press_events)
if self.scroll_image: self.canvas.mpl_disconnect(self.scroll_image)
self.statusBar().showMessage('View mode change to clickable mode enabled!', 1500)
except:
self.statusBar().showMessage('Try loading image first or try again!', 2500)
# Make image scrolling enabled
def set_scroll_enabled(self):
try:
self.scroll_image = self.canvas.mpl_connect('axes_enter_event', self.scroll_ctrl.fig_enter_event)
if self.click_image: self.canvas.mpl_disconnect(self.click_image)
self.statusBar().showMessage('View mode change to scroll mode enabled!', 1500)
except:
self.statusBar().showMessage('Try loading image first or try again!', 2500)
# Show Transverse plane on single window
def show_tran_plane(self):
try:
self.single_window = SingleWindow('Transverse Plane', self.vol_tran, self.tran_index, self.roi_data, self.pat_id)
self.single_window.show()
except:
self.statusBar().showMessage('Try loading image first or try again!', 1500)
# Show Frontal plane on single window
def show_fron_plane(self):
try:
self.single_window = SingleWindow('Frontal Plane', self.vol_fron, self.fron_index, self.roi_data, self.pat_id)
self.single_window.show()
except:
self.statusBar().showMessage('Try loading image first or try again!', 1500)
# Show Sagittal plane on single window
def show_sagi_plane(self):
try:
self.single_window = SingleWindow('Sagittal Plane', self.vol_sagi, self.sagi_index, self.roi_data, self.pat_id)
self.single_window.show()
except:
self.statusBar().showMessage('Try loading image first or try again!', 1500)
# Open ROI table
def show_roi_data(self):
self.roi_window = RoiWindow(self.roi_data)
self.roi_window.show()
# Plot the image using pixel data
def plot_images(self):
# Define fig, canvas, subplots and initialize roi data set
fig = Figure()
self.canvas = FigureCanvas(fig)
ax_tran = fig.add_subplot(221, adjustable='box', aspect='auto')
ax_fron = fig.add_subplot(223, adjustable='box', aspect='auto')
ax_sagi = fig.add_subplot(224, adjustable='box', aspect='auto')
# To display Patient information
ax_info = fig.add_subplot(222, adjustable='box', aspect='auto')
ax_info.axis('off')
if self.new_matdata_path:
id = Patient(self.new_matdata_path, 'PatientID')
name = Patient(self.new_matdata_path, 'PatientName')
age = Patient(self.new_matdata_path, 'PatientAge')
id_str = id.patient_dict()
name_str = name.patient_dict()
age_str = age.patient_dict()
patient_id = re.findall('\d+', id_str)
patient_name = re.findall('\w+', name_str)
patient_age = re.findall('\w+', age_str)
ax_info.text(0.1, 0.8, 'patient ID: ' + patient_id[0], color='k')
ax_info.text(0.1, 0.6, 'patient name: ' + patient_name[0], color='k')
ax_info.text(0.1, 0.4, 'patient age: ' + patient_age[0], color='k')
self.pat_id = patient_id[0]
else:
ax_info.text(0.1, 0.8, 'patient ID: ' + 'No dcmHeader.mat', color='k')
self.pat_id = 'No dcmHeader.mat'
# To display image, click image and scroll image
im_dis = ImageDisplay(self.vol_tran, self.vol_fron, self.vol_sagi, ax_tran, ax_fron, ax_sagi, self.tran_index,
self.fron_index, self.sagi_index)
self.click_ctrl = ClickController(fig, im_dis, self.vol_tran, self.vol_fron, self.vol_sagi, ax_tran,
ax_fron, ax_sagi, self.tran_index, self.fron_index, self.sagi_index)
self.scroll_ctrl = ScrollController(fig, im_dis, self.vol_tran, self.vol_fron, self.vol_sagi, ax_tran,
ax_fron, ax_sagi, self.tran_index, self.fron_index, self.sagi_index)
# Display canvas of Matplotlib as central Widget
self.widget = QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QVBoxLayout())
self.widget.layout().addWidget(self.canvas)
# Start of main program to run
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
# GPL # "author": Paulo_Gomes
import bpy
from mathutils import Quaternion, Vector
from math import cos, sin, pi
from bpy.props import (
FloatProperty,
IntProperty,
BoolProperty,
)
# Create a new mesh (object) from verts/edges/faces
# verts/edges/faces ... List of vertices/edges/faces for the
# new mesh (as used in from_pydata)
# name ... Name of the new mesh (& object)
def create_mesh_object(context, verts, edges, faces, name):
# Create new mesh
mesh = bpy.data.meshes.new(name)
# Make a mesh from a list of verts/edges/faces
mesh.from_pydata(verts, edges, faces)
# Update mesh geometry after adding stuff
mesh.update()
from bpy_extras import object_utils
return object_utils.object_data_add(context, mesh, operator=None)
# A very simple "bridge" tool
def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
faces = []
if not vertIdx1 or not vertIdx2:
return None
if len(vertIdx1) < 2 and len(vertIdx2) < 2:
return None
fan = False
if (len(vertIdx1) != len(vertIdx2)):
if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
fan = True
else:
return None
total = len(vertIdx2)
if closed:
# Bridge the start with the end
if flipped:
face = [
vertIdx1[0],
vertIdx2[0],
vertIdx2[total - 1]]
if not fan:
face.append(vertIdx1[total - 1])
faces.append(face)
else:
face = [vertIdx2[0], vertIdx1[0]]
if not fan:
face.append(vertIdx1[total - 1])
face.append(vertIdx2[total - 1])
faces.append(face)
# Bridge the rest of the faces
for num in range(total - 1):
if flipped:
if fan:
face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
else:
face = [vertIdx2[num], vertIdx1[num],
vertIdx1[num + 1], vertIdx2[num + 1]]
faces.append(face)
else:
if fan:
face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
else:
face = [vertIdx1[num], vertIdx2[num],
vertIdx2[num + 1], vertIdx1[num + 1]]
faces.append(face)
return faces
def add_twisted_torus(major_rad, minor_rad, major_seg, minor_seg, twists):
PI_2 = pi * 2.0
z_axis = (0.0, 0.0, 1.0)
verts = []
faces = []
edgeloop_prev = []
for major_index in range(major_seg):
quat = Quaternion(z_axis, (major_index / major_seg) * PI_2)
rot_twists = PI_2 * major_index / major_seg * twists
edgeloop = []
# Create section ring
for minor_index in range(minor_seg):
angle = (PI_2 * minor_index / minor_seg) + rot_twists
vec = Vector((
major_rad + (cos(angle) * minor_rad),
0.0,
sin(angle) * minor_rad))
vec = quat * vec
edgeloop.append(len(verts))
verts.append(vec)
# Remember very first edgeloop
if major_index == 0:
edgeloop_first = edgeloop
# Bridge last with current ring
if edgeloop_prev:
f = createFaces(edgeloop_prev, edgeloop, closed=True)
faces.extend(f)
edgeloop_prev = edgeloop
# Bridge first and last ring
f = createFaces(edgeloop_prev, edgeloop_first, closed=True)
faces.extend(f)
return verts, faces
class AddTwistedTorus(bpy.types.Operator):
bl_idname = "mesh.primitive_twisted_torus_add"
bl_label = "Add Twisted Torus"
bl_description = "Construct a twisted torus mesh"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
major_radius = FloatProperty(
name="Major Radius",
description="Radius from the origin to the"
" center of the cross section",
min=0.01,
max=100.0,
default=1.0
)
minor_radius = FloatProperty(
name="Minor Radius",
description="Radius of the torus' cross section",
min=0.01,
max=100.0,
default=0.25
)
major_segments = IntProperty(
name="Major Segments",
description="Number of segments for the main ring of the torus",
min=3,
max=256,
default=48
)
minor_segments = IntProperty(
name="Minor Segments",
description="Number of segments for the minor ring of the torus",
min=3,
max=256,
default=12
)
twists = IntProperty(
name="Twists",
description="Number of twists of the torus",
min=0,
max=256,
default=1
)
use_abso = BoolProperty(
name="Use Int/Ext Controls",
description="Use the Int/Ext controls for torus dimensions",
default=False
)
abso_major_rad = FloatProperty(
name="Exterior Radius",
description="Total Exterior Radius of the torus",
min=0.01,
max=100.0,
default=1.0
)
abso_minor_rad = FloatProperty(
name="Inside Radius",
description="Total Interior Radius of the torus",
min=0.01,
max=100.0,
default=0.5
)
def execute(self, context):
if self.use_abso is True:
extra_helper = (self.abso_major_rad - self.abso_minor_rad) * 0.5
self.major_radius = self.abso_minor_rad + extra_helper
self.minor_radius = extra_helper
verts, faces = add_twisted_torus(
self.major_radius,
self.minor_radius,
self.major_segments,
self.minor_segments,
self.twists
)
# Create the mesh object from this geometry data.
obj = create_mesh_object(context, verts, [], faces, "TwistedTorus")
return {'FINISHED'}
|
from myspiders.ruia import JsonField, Item, Spider, Bs4HtmlField, Bs4TextField
from urllib.parse import urlencode, urlparse, urljoin, quote
from config import Job
'''
companyName: null
deptOrgName: "广发总行"
education: "本科"
endDate: "2020-06-10"
hiddenSiteApply: 1
importPost: 0
lastEditDate: null
orgId: 102501
orgName: "广发总行"
outerRecruit: null
postId: 142202
postIdToken: "b3096eb16decddbf"
postName: "总行战略规划部主管(战略管理)"
postSyncToXiaojianren: 1
postType: "战略管理类"
publishDate: "2020-06-02"
recommendStarOfIr: null
recruitNum: 2
recruitType: 2
serviceCondition: "1、大学本科及以上学历;
<br>2、本科学历,5年以上金融工作经验;研究生或以上学历、特别优秀者可适当放宽。有中国人寿工作经历、同业综合金融工作经验者优先;
<br>3、具备中级职称及以上或等同专业资格证书的优先考虑;
<br>4、熟悉综合金融或具有某项业务、项目整体推进、协调经验;
<br>5、具备较好的研究和信息处理能力;
<br>6、具备较强的文字表达和汇总分析能力;
<br>7、思路清晰,并善于总结和分析,能够发现问题,找出解决问题的思路和方向;
<br>8、具备独立处理事务的管理能力;
<br>9、具备较强的团队组织和合作能力;
<br>10、较强的人际沟通能力和协调能力。"
workContent: "1、承接集团公司重振国寿综合化战略,负责制定全行综合金融总体规划及年度要点、计划;
<br>2、开展综合金融策略研究;
<br>3、协调各成员单位开展协同业务对接,制定项目推进方案;
<br>4、研究制定综合金融利益分配及激励机制;
<br>5、推进和督导分行协同试点工作,总结和推广试点经验;
<br>6、承接、组织集团公司及总行各类综合金融会议、调研;
<br>7、完成各级领导交办的其他事项。"
workPlace: "广州市"
workPlaceCode: "0/4/396/397"
workType: "全职"
'''
class CgbchinaItem(Item):
url_detail = 'https://www.hotjob.cn/wt/chinaciticbank/web/index/webPositionN310!getOnePosition?postId=%s&recruitType=2&brandCode=1&importPost=0&columnId=2'
target_item = JsonField(json_select='postList')
bank_name = JsonField(default='广发银行')
type_main = JsonField(default='社会招聘')
name = JsonField(json_select='postName')
job_id = JsonField(json_select='postId')
branch_name = JsonField(json_select='orgName')
department = JsonField(json_select='deptOrgName')
education = JsonField(json_select='education')
requirement = JsonField(json_select='serviceCondition')
content = JsonField(json_select='workContent')
place = JsonField(json_select='workPlace')
date_publish = JsonField(json_select='publishDate')
date_close = JsonField(json_select='endDate')
async def clean_job_id(self, value):
self.results['url'] = self.url_detail % value
return str(value)
async def clean_date_publish(self, value):
return value + ' 00:00:00'
async def clean_date_close(self, value):
return value + ' 00:00:00'
async def clean_recruit_num(self, value):
if not value:
return ''
return str(value)
class CgbchinaWorker(Spider):
name = 'CgbchinaWorker'
bank_name = '广发银行'
start_urls = [("https://www.hotjob.cn/wt/chinalife/web/json/position/list?positionType=&comPart=101703&brandCode=1&trademark=0&useForm=0&recruitType=2&lanType=&positionName=&workPlace=&keyWord=&page=" + str(index + 1)) for index in range(5)]
async def parse(self, response):
jsondata = await response.json(content_type='text/plain')
async for item in CgbchinaItem.get_json(jsondata=jsondata):
data = item.results
job = Job.do_load(data)
await self.save_job(job)
def start():
# CgbchinaWorker.start()
pass
|
from qgis.PyQt.QtWidgets import *
from .diverseity_results_dialog_ui import Ui_dlgResults
class DlgResults(QDialog, Ui_dlgResults):
def __init__(self):
super(DlgResults, self).__init__()
self.setupUi(self)
self.setLayout(self.lytMain)
self.trwResults.setColumnWidth(0, 250)
self.trwResults.setColumnWidth(1, 80)
self.trwResults.setColumnWidth(2, 80)
self.trwResults.setColumnWidth(3, 80)
self.trwResults.setColumnWidth(4, 80)
self.trwResults.setColumnWidth(5, 80)
|
import itertools
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import FieldError
from autoslug import AutoSlugField
from otcore.lex.lex_utils import lex_slugify
from otcore.topic.models import Tokengroup
from otcore.relation.models import RelatedBasket
from otcore.management.common import substring_before
from otcore.settings import otcore_settings
def pop_from(instance):
"""
Legacy function required for migrations
"""
return instance.name
class Scope(models.Model):
"""
Scope is the way to disambiguate between names using the same string.
For example: New York (scope:City) and New York (scope:State).
"""
scope = models.CharField(max_length=100, null=False, unique=True,
default='Generic', db_index=True)
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.scope
def get_absolute_url(self):
return reverse('scope-detail', args=[self.slug])
class Basket(models.Model):
"""
A basket is a container that groups the various hits being used as a topic name.
The label of a basket is identical to the topic identifier.
"""
label = models.CharField(max_length=512, db_index=True)
types = models.ManyToManyField('topic.Ttype', blank=True, related_name="baskets")
tokengroups = models.ManyToManyField('topic.Tokengroup', blank=True)
display_name = models.CharField(max_length=512, blank=True)
description = models.TextField(blank=True)
# Remove categories because a topic can have mulltiple types, and then it's a matter of querying on intersection of types.
"""
Number of names for the topic.
"""
@property
def number_of_names(self):
return len(self.topic_hits.all())
def local_tokengroup(self):
"""
Creates token groups for the current basket.
"""
slugs = '-'.join([hit.slug for hit in self.topic_hits.all()])
alltokens = '-'.join(list(set(slugs.split('-'))))
tokens = alltokens.split('-')
# Only make token groups if there are more then a certain number of words in the name.
if len(tokens) >= otcore_settings.MULTIPLE_RELATIONS_COUNT:
tokengroups = ['-'.join(sorted(list(x))) for x in itertools.combinations(tokens, otcore_settings.MULTIPLE_RELATIONS_COUNT)]
tokengroup_objects = []
for tokengroup in tokengroups:
tokengroup_objects.append(Tokengroup.objects.get_or_create(group=tokengroup)[0])
self.tokengroups.add(*tokengroup_objects)
@property
def related_baskets(self):
"""
This works best for generic, symmetrical relationships (no role_from, role_to)
"""
related_topics = set()
for rbasket in RelatedBasket.objects.filter(source=self):
related_topics.add(rbasket.destination)
for rbasket in RelatedBasket.objects.filter(destination=self):
related_topics.add(rbasket.source)
return list(related_topics)
def update_display_name(self, save_on_change=True):
current_display_name = self.display_name
topic_hits = self.topic_hits.all()
if len(topic_hits) == 0:
# If there is only one name, consider it as the preferred name.
# But don't save it as a preferred_name in Hit.
self.display_name = '*NO AVAILABLE NAME* - {}'.format(self.label)
elif len([hit for hit in topic_hits if hit.hidden == False]) == 0:
# If all names are hidden, report a dummy name "*NO VISIBLE NAME*"
self.display_name = '*NO VISIBLE NAME* - {}'.format(self.label)
else:
try:
# If one of the names has preferred set to True, take it.
preferred = [hit for hit in topic_hits if hit.preferred == True][0]
display_name = preferred.name
except IndexError:
# Otherwise. No preferred name on the basket, no hit is preferred.
# Take the longest name available
names = [hit.name for hit in topic_hits if hit.hidden is False]
display_name = max(names, key=len)
self.display_name = display_name
if save_on_change and self.display_name != current_display_name:
self.save()
# If there is no preferred name, populates it based on the current set of names
# to change the preferred name, always use the hit.set_preferred() function, which will update the basket
def save(self, *args, **kwargs):
self.update_display_name(save_on_change=False)
return super(Basket, self).save(*args, **kwargs)
@property
def longest_name(self):
names = [hit.name for hit in self.topic_hits.all()]
return max(names, key=len)
def __str__(self):
return self.display_name
class Meta:
ordering = ['display_name', ]
@staticmethod
def create_from_string(name_string):
"""
Creates a hit from a name string. Does not handle disambiguation, so simply raises an
exception if a slug equivalent string exists
"""
slug = lex_slugify(name_string)
assert not Hit.objects.filter(slug=slug).exists(), (
"A Hit with slug {}, created from {}, already exists. The `create_from_string` function does "
"not handle disambiguation. Pleace create basket by different means."
)
hit = Hit.objects.create(name=name_string)
hit.create_basket_if_needed()
return hit.basket
def get_default_scope():
scope, _ = Scope.objects.get_or_create(scope="Generic")
return scope.id
class Hit(models.Model):
"""
Hit represents the information as it is found in the sources. It's the characters strings as extracted from source documents, index entries, or external knowledge base files such as hubtop.
'name' is the character string itself.
'bypass' indicates whether a given string, if found, should be ignored and not be part of the topic map. For example, if the topics are acquired from extracting from headers, and a header is "Table 1", bypass can be used to exclude it from appearing in the topic map. 'bypass' differs from a deletion, because bypass is a permanent marker for exclusion: each time the same string will pop up in a new context, it will be excluded.
'slug' is an automatically created field that aims at "normalizing" the spelling, so that alternative spellings would end up producing the same topic. Rules for transforming a name into a slug can be customized using stop words, expressions (words to keep together as a unit), custom (irregular) transformations, and acronyms.
'scope' is an attribute that is used to differentiate domains in which a topic name is valid. The same name could be used to describe different topics (homonyms). For example, "New York" can be used for the city or for the state. Adding scopes to the name is used to create distinct topics.
'error' refers to a processing error indicating that the hit didn't make it through and explaining why.
"""
name = models.CharField(max_length=512, db_index=True)
# Extra property for names.
# Note. Will be used for acronyms.
kind = models.CharField(max_length=15, null=True, blank=True)
slug = AutoSlugField(max_length=512, populate_from='name', slugify=lex_slugify, unique=False, always_update=True)
scope = models.ForeignKey('Scope', default=get_default_scope, on_delete=models.CASCADE, related_name='hits') # Default = Generic
hidden = models.BooleanField(default=False)
preferred = models.BooleanField(default=False)
bypass = models.BooleanField(default=False)
basket = models.ForeignKey(Basket, related_name='topic_hits', null=True, blank=True, on_delete=models.CASCADE)
@property
def tokens(self):
return set(self.slug.split('-'))
def __str__(self):
name = self.name
if self.scope_id != 0:
name += " [as " + self.scope.scope + "]"
return name
def equivalents(self):
return [hit for hit in Hit.objects.filter(slug=self.slug, scope=self.scope).exclude(name=self.name)]
def create_basket_if_needed(self, force=False):
# If there is another name with the same slug, use that basket.
if not force:
for equivalent in self.equivalents():
if equivalent.basket is not None:
self.basket = equivalent.basket
self.save()
self.basket.save()
break
# If the basket already exists, don't do anything
# Otherwise, create a new basket.
if not self.basket:
label = '%s%s%s' % (self.slug, otcore_settings.SCOPE_SEPARATOR, self.scope.id)
basket = Basket.objects.create(label=label)
self.basket = basket
self.save()
def set_bypass(self, bypass_val):
"""
Sets the bypass attribute for the hit.
Expects a boolean true/false value
If setting to true, it will take the hit off its current basket.
If setting to false, it will attach the hit to the relevant basket (and create one if needed)
"""
if (bypass_val and self.bypass) or (not bypass_val and not self.bypass):
# Submitted value matches current attribute. Do nothing
pass
else:
if bypass_val:
self.bypass = True
basket = self.basket
self.basket = None
self.save()
if basket and basket.topic_hits.count() == 0:
basket.delete()
else:
self.bypass = False
self.save()
self.create_basket_if_needed()
# Use to set a hit as preferred
# Will ensure that no other hits on a topic are marked as preferred as well
def make_preferred(self, force=False, save=True):
# ignore for bypassed and basket-less hits
# or if the current hit is already preferred
if self.bypass or not self.basket or self.preferred and not force:
pass
else:
# Mark any other preferred names as not preferred
for hit in Hit.objects.filter(basket=self.basket, preferred=True).exclude(id=self.id):
hit.preferred = False
hit.save()
self.preferred = True
self.hidden = False
self.basket.display_name = self.name
self.basket.save()
if save:
self.save()
# only use this to set hit as hidden
def set_hidden(self, hidden_val):
# ignore if bypassed and/or basketless or hidden value isn't changing
# or this is the only name on the topic
if self.bypass or not self.basket or self.hidden == hidden_val:
pass
else:
other_names = self.basket.topic_hits.exclude(id=self.id)
visible_names = [x for x in other_names if not x.hidden]
# if trying to set hidden to True and there are no other visible names, raise Exception
if hidden_val and len(visible_names) == 0:
raise FieldError('You cannot set all names on a topic to hidden')
self.hidden = hidden_val
self.save()
def save(self, *args, **kwargs):
super(Hit, self).save(*args, **kwargs)
if self.basket:
self.basket.update_display_name()
class Meta:
ordering = ['name', ]
unique_together = (('name', 'scope'))
def update_display_name_on_hit_delete(sender, instance, **kwargs):
"""
Function to make sure basket display names are properly recalculated when a hit is
deleted
"""
try:
if instance.basket:
instance.basket.update_display_name()
except Basket.DoesNotExist:
pass
models.signals.post_delete.connect(
update_display_name_on_hit_delete,
sender=Hit,
weak=False,
dispatch_uid='models.update_display_name_on_hit_delete'
)
|
from tools.bot import AdminBot
bot = AdminBot()
bot.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017-2019, Erin Morelli.
Title : EM Downpour Downloader
Author : Erin Morelli
Email : erin@erinmorelli.com
License : MIT
Version : 0.2
"""
# Future
from __future__ import print_function
# Built-ins
import os
import re
import sys
import json
import codecs
import pickle
import argparse
from datetime import datetime, timedelta
# Third-party
import yaml
import requests
from lxml import html
from bs4 import BeautifulSoup
from tabulate import tabulate
from clint.textui import progress
from requests.packages import urllib3
from requests_cache import CachedSession
# Script credits
__title__ = 'EM Downpour Downloader'
__copyright__ = 'Copyright (c) 2017-2019, Erin Morelli'
__author__ = 'Erin Morelli'
__email__ = 'erin@erinmorelli.com'
__license__ = 'MIT'
__version__ = '0.2'
# Disable SSL warnings
urllib3.disable_warnings()
# Set up UTF-8 encoding for Python 2
if sys.version_info[0] < 3:
__writer__ = codecs.getwriter('utf8')
sys.stdout = __writer__(sys.stdout)
class EMDownpourDownloader(object):
"""Class to interact with the Downpour API.
Attributes:
RAW (int): Constant value for outputting raw (python) responses
JSON (int): Constant value for outputting JSON responses
CLI (int): Constant value for outputting command-line responses
"""
# Set output types
RAW = 1
JSON = 2
CLI = 3
def __init__(self, output=RAW):
"""Initialize class and connect to the Downpour API.
Args:
output (int): Output type identifier
"""
self._root_dir = os.path.dirname(os.path.realpath(__file__))
# Get local user directory
self.local_dir = os.path.join(
os.path.expanduser('~'), '.config', 'downpour-downloader')
# Get config file
self.config_file = os.path.join(self.local_dir, 'config.yml')
# Set Downpour connection info
self.downpour = dict()
self.downpour['root'] = 'https://www.downpour.com/{0}'
self.downpour['filetypes'] = ['m4b', 'mp3']
self.downpour['headers'] = {
'User-Agent': '%s/%s'.format(__title__, __version__)
}
# Get user config settings
self.config = None
self._load_config()
# Check download folder read/write access
self._check_folder_permissions(self.config['folder_abs'])
# Set requests caching data
self._cache = {
'pickle_protocol': 2,
'cookie_expire_default': {
'hours': 1
},
'cookies_file': os.path.join(self.local_dir, '.downpour_cookies'),
'expire_default': 3600, # 1 hour in seconds
'file': os.path.join(
self.local_dir,
'.downpour_cache_v{major}{minor}'.format(
major=sys.version_info[0],
minor=sys.version_info[1]
)
)
}
# Update defaults from config
for update_key in ['cookie_expire_default', 'expire_default']:
if (update_key in self.config.keys() and
self.config[update_key] is not None):
self._cache[update_key] = self.config[update_key]
# Set up script
self._script_actions = {
'library': 'lists all books available to download',
'book': 'get information about a given book by ID',
'download': 'downloads book(s) by ID'
}
# Set output type
self.output = output
# Handle command-line arguments
self._args = None
self._load_args()
# Load cache and initialize requests session
self.session = None
self._load_requests_cache(refresh=self._args['refresh'])
# Login to Downpour and load session cookie jar
self._load_cookie_jar(refresh=self._args['refresh'])
def _get_argparser(self):
"""Configure and return argument parser.
Returns:
DownpourArgumentParser: Command-line argument parser object
"""
action_help = 'commands:\n'
# Set up command choices help
for choice in self._script_actions.keys():
action_help += ' {key:21} {desc}\n'.format(
key=choice,
desc=self._script_actions[choice]
)
# Set up script usage and help output
help_output = '{usage}\n\n{action_help}'.format(
usage='%(prog)s <command> [book ID(s)] [options]',
action_help=action_help
)
# Set up argument parser
argparser = DownpourArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
usage=help_output
)
# Positional arguments
argparser.add_argument(
'action',
metavar='command',
help=argparse.SUPPRESS,
choices=self._script_actions.keys()
)
argparser.add_argument(
'book_ids',
nargs='*',
help=argparse.SUPPRESS,
action=ScriptAction
)
# Optional flags
argparser.add_argument(
'-v', '--version',
action='version',
version=get_version(False)
)
argparser.add_argument(
'-j', '--json',
default=False,
help='prints responses as JSON',
action='store_true'
)
argparser.add_argument(
'-r', '--refresh',
default=False,
help='force a refresh of cached library data',
action='store_true'
)
argparser.add_argument(
'-f', '--folder',
default=self.config['folder'],
help='specify a folder for files to be downloaded to',
action=FileAction
)
argparser.add_argument(
'-t', '--filetype',
metavar='FILETYPE',
default=self.config['filetype'],
help='{0} [{1}].'.format(
'specify which audiobook filetype to download',
', '.join(self.downpour['filetypes'])
),
choices=self.downpour['filetypes']
)
argparser.add_argument(
'-d', '--desc',
help='sort library in descending order by purchase date',
action='store_true'
)
argparser.add_argument(
'-c', '--count',
help='specify number of books to return from library',
type=int
)
# Return argument parser
return argparser
def _load_args(self):
"""Parse and load command-line arguments."""
if __name__ != '__main__':
return
# Get argument parser
argparser = self._get_argparser()
# Check for no args and print help
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse arguments
args = argparser.parse_args()
# Store arguments as dict
self._args = args.__dict__
# Update instance with parsed argument data
if self._args is not None:
# Merge arguments with config
for config_key in self.config.keys():
if config_key in self._args.keys():
self.config[config_key] = self._args[config_key]
# Always refresh for download requests
if self._args['action'] == 'download':
self._args['refresh'] = True
# Check for JSON output flag
if self._args['json']:
self.output = self.__class__.JSON
def _load_config(self):
"""Load the user config file data into instance variable."""
self.config = yaml.load(open(self.config_file).read())
# Set required fields
required_fields = ['username', 'password', 'folder', 'filetype']
# Check for required fields
for required in required_fields:
if (required not in self.config.keys() or
self.config[required] is None):
error = "Error: configuration field '{0}' is not defined"
sys.exit(error.format(required))
# Check that file type is valid
if self.config['filetype'] not in self.downpour['filetypes']:
error = "Error: configuration field 'filetype' must be one of: {0}"
sys.exit(error.format(', '.join(self.downpour['filetypes'])))
# Parse config folder path
self.config['folder_abs'] = os.path.abspath(
os.path.expanduser(self.config['folder']))
def _load_requests_cache(self, refresh=False):
"""Load the requests cache module with user settings.
Args:
refresh (bool, optional): Force a refresh of cached requests data
"""
if ('expire_default' in self.config.keys() and
self.config['expire_default'] is not None):
expire_after = self.config['expire_default']
else:
# Fallback to default expiration
expire_after = self._cache['expire_default']
# Set up requests session cache
self.session = CachedSession(
self._cache['file'],
expire_after=expire_after,
allowable_methods=('GET', 'POST')
)
# Set requests session headers
self.session.headers = self.downpour['headers']
# Clear session cache
if refresh:
self.session.cache.clear()
def _get_cookies(self):
"""Login to Downpour and retrieve user session cookies.
Returns:
RequestsCookieJar: Requests session cookie jar object from Downpour
"""
login_error = 'Error: unable to login to Downpour'
# Visit Downpour home page
home = self.session.get(self.downpour['root'].format(''))
# Set up login URL regex
home_regex = r'<a href=\"({0}/.+/)\" >{1}</a>'.format(
r'https://www\.downpour\.com/customer/account/login/referer',
r'<span>Sign In</span>'
)
# Look for login URL
home_match = re.search(home_regex, home.text, re.I)
login_url = home_match.group(1)
# Make sure we got a URL
if login_url is None:
sys.exit(login_error)
# Navigate to login page
post = self.session.get(login_url)
# Set up post URL regex
post_regex = r'<form action="(.+)"\s+{0} {1} {2}>'.format(
'method="post"',
'id="login-form"',
'class="scaffold-form"'
)
# Look for post URL
post_match = re.search(post_regex, post.text)
post_url = post_match.group(1)
# Make sure we got a URL
if post_url is None:
sys.exit(login_error)
# Set up form key regex
key_regex = r'<input name="form_key" type="hidden" value="(\w+)" />'
# Look for form key
key_match = re.search(key_regex, post.text)
form_key = key_match.group(1)
# Make sure we got a form key
if form_key is None:
sys.exit(login_error)
# Login to Downpour
login = requests.post(
post_url,
data={
'form_key': form_key,
'login[username]': self.config['username'],
'login[password]': self.config['password'],
'send': ''
},
cookies=self.session.cookies,
headers=self.downpour['headers']
)
# Attempt to retrieve user library
library = self.session.get(
self.downpour['root'].format('my-library'),
cookies=login.cookies
)
# Set up validation regex
valid_regex = r'<a href="{0}" title="Log Out" >'.format(
self.downpour['root'].format('customer/account/logout/'),
)
# # Look for login URL
if not re.search(valid_regex, library.text, re.I):
sys.exit(login_error)
# Return user cookies
return library.cookies
def _store_cookies(self, cookies):
"""Store user cookies to local cache file.
Args:
RequestsCookieJar: Requests session cookie jar object from Downpour
"""
pickle.dump(
cookies,
open(self._cache['cookies_file'], 'wb+'),
protocol=self._cache['pickle_protocol']
)
def _load_cookies(self):
"""Load user cookies from local cache file."""
return pickle.load(open(self._cache['cookies_file'], 'rb'))
def _fill_cookie_jar(self, cookies):
"""Load cached cookies into instance Requests cookie jar.
Args:
RequestsCookieJar: Requests session cookie jar object from Downpour
"""
self.session.cookies.update(cookies)
def _cookies_expired(self, cookies):
"""Check if user cookies have expired.
Args:
RequestsCookieJar: Requests session cookie jar object from Downpour
Retuns:
bool: True if any required cookies have expired, else False
"""
now = datetime.now()
# Get modification time of cookie file
mod_time = datetime.fromtimestamp(
os.path.getmtime(self._cache['cookies_file'])
)
# Get refresh time from config
if ('cookie_cache' in self.config.keys() and
self.config['cookie_cache'].keys()):
refresh = timedelta(**self.config['cookie_cache'])
else:
# Set default refresh time
refresh = timedelta(**self._cache['cookie_expire_default'])
# Check cookie file modification time
if now - mod_time > refresh:
return True
# Check on Downpour cookie expiration times
for cookie in cookies:
# Get cookie expiration
expires = datetime.fromtimestamp(cookie.expires)
# Exit if cookie has expired
if now > expires:
return True
# Return not expired
return False
def _load_cookie_jar(self, refresh=False):
"""Retrieve cookies from local cache or new from Downpour.
Args:
refresh (bool, optional): Force a refresh of cached cookie data
"""
if not refresh:
try:
cookies = self._load_cookies()
except IOError:
refresh = True
else:
# Check for missing or expired cookies
if not cookies or self._cookies_expired(cookies):
refresh = True
# Get new cookies
if refresh:
# Retrieve cookies from Downpour
cookies = self._get_cookies()
# Store new cookies
self._store_cookies(cookies)
# Fill the cookie jar
self._fill_cookie_jar(cookies)
def _check_folder_permissions(self, folder):
"""Check that folder exists and is writable."""
if not os.path.exists(folder):
error = 'Error: folder does not exist: {0}'
sys.exit(error.format(folder))
# Check that directory is readable and writable
if not os.access(folder, os.W_OK or os.R_OK):
error = 'Error: folder does not have read/write permissions: {0}'
sys.exit(error.format(folder))
def _do_action_library(self, output=None):
"""Retrieve a list of the user's Downpour library books.
Args:
output (int, optional): Override the class-level output type
Returns:
dict, str: Library book data, depending on output type
Can be dict, JSON, or formatted ascii table
"""
if output is None:
output = self.output
# Get books from Downpour
books = self.get_library()
# Handle CLI args
if self._args['desc']:
books = list(reversed(books))
if self._args['count']:
books = books[:self._args['count']]
# If we want a non-CLI response, stop here
if output is self.__class__.RAW:
return books
elif output is self.__class__.JSON:
return json.dumps(books)
# Set up table headers
table_headers = [
'ID',
'Title',
'Author',
'Runtime',
'Purchased'
]
# Set up table display
table_data = []
# Format book data
for book in books:
# Parse purchase date as datetime object
purchase_date = datetime.strptime(
book['purchase_date_string'], '%Y-%m-%d')
# Set up table row
table_data.append([
book['book_id'],
truncate(book['title']),
truncate(', '.join(book['author'].split('|'))),
'{0} hr'.format(book['runtime']),
purchase_date.strftime('%d %b %y')
])
# Return formatted and UTF-8 encoded table
return tabulate(
table_data,
headers=table_headers,
tablefmt="psql"
)
def _do_action_book(self, book_id=None, output=None):
"""Retrieve and display information about a specific Downpour book.
Args:
book_id (str, optional): Downpour book ID
Defaults to first value parsed from command-line `book_ids`
output (int, optional): Override the class-level output type
Returns:
dict, str: Single library book data, depending on output type
Can be dict, JSON, or formatted ascii text
"""
if book_id is None:
book_id = self._args['book_ids'][0]
if output is None:
output = self.output
# Get book
book = self.get_book_by_id(book_id)
# If we want a non-CLI response, stop here
if output is self.__class__.RAW:
return book
elif output is self.__class__.JSON:
return json.dumps(book)
# Get purchase date as datetime object
purchase_date = datetime.strptime(
book['purchase_date_string'], '%Y-%m-%d')
# Set output formatting
form = u'{0:>15}: {1}'
# Format book data
book_data = [
form.format(
'ID', book['book_id']),
form.format(
'Title', book['title']),
form.format(
'Author(s)', ', '.join(book['author'].split('|'))),
form.format(
'Runtime', '{0} hours'.format(book['runtime'])),
form.format(
'Purchase Date', purchase_date.strftime('%d %B %Y')),
form.format(
'Released', 'Yes' if book['is_released'] else 'No'),
form.format(
'Rental', 'Yes' if book['is_rental'] else 'No'),
form.format(
'DRM', 'Yes' if book['drm'] else 'No'),
form.format(
'Link', book['link']
)
]
# Return formatted book data
return '\n'.join(book_data)
def _do_action_download(self, book_ids=None, output=None):
"""Download book(s) from Downpour.
Args:
book_ids (list, optional): List of Downpour book IDs
Defaults to values parsed from command-line `book_ids`
output (int, optional): Override the class-level output type
"""
if book_ids is None:
book_ids = self._args['book_ids']
if output is None:
output = self.output
# Track downloaded books
downloaded_books = {}
# Iterate over book IDs to download
for idx, book_id in enumerate(book_ids):
# Print new line between books
if idx and output is self.__class__.CLI:
print('\n', file=sys.stdout)
# Download selected book
downloaded_books[book_id] = self.download_book(book_id)
# Output formatted response
if output is self.__class__.RAW:
return downloaded_books
elif output is self.__class__.JSON:
return json.dumps(downloaded_books)
def do_action(self, action=None):
"""Wrapper function to perform a specific action.
Args:
action (str, optional): Name of the action to perform
Defaults to parsed command-line value `command`
"""
if action is None:
action = self._args['action']
# Check for valid action
if action not in self._script_actions.keys():
sys.exit("Error: invalid action: '{0}' (choose from {1})".format(
action,
', '.join(self._script_actions.keys())
))
# Get function to perform action
action_func = '_do_action_{action}'.format(action=action)
# Do action
if hasattr(self, action_func):
return getattr(self, action_func)()
def get_library(self):
"""Retrieve list of user library books from Downpour.
Returns:
dict: Parsed JSON data from API response
"""
library = self.session.get(
self.downpour['root'].format('my-library')
)
# Parse HTML
soup = BeautifulSoup(library.text, 'html.parser')
# Find book data
books_html = soup.find_all(
'span',
attrs={'class': 'product-library-item-link'}
)
from pprint import pprint
# Populate book list
books = []
for book in books_html:
attrs = book.attrs
runtime = attrs['data-runtime']
books.append({
'author': attrs['data-author-display-string'],
'book_id': attrs['data-book_id'],
'drm': attrs['data-drm'] == '1',
'expiration': attrs['data-expiration'],
'is_released': attrs['data-is-released'] == '1',
'is_rental': attrs['data-is-rental'] == '1',
'itemid': attrs['data-itemid'],
'purchase_date': attrs['data-purchase-date'],
'purchase_date_string': attrs['data-purchase-date-string'],
'release_date': attrs['data-release-date'],
'remaining': attrs['data-remaining-string'],
'runtime': 0 if runtime == '' else float(runtime),
'sku': attrs['data-sku'],
'link': attrs['data-href'],
'title': attrs['title'],
'cover': book.find('img').attrs['src']
})
# Return complete book list
return books
def get_book_by_id(self, book_id):
"""Retrieve book from user Downpour library by book ID.
Args:
book_id (str): Downpour book ID
Returns:
dict: Parsed JSON data from API response
"""
books = self.get_library()
# Find book in library
return next(book for book in books if book['book_id'] == book_id)
def get_book_file_data(self, book):
"""Retrieve additional file information from Downpour.
Args:
book (dict): Downpour book data
Returns:
list: List of file part data for making download requests
"""
dp_root = self.downpour['root']
# Make request to get book files download information
dl_data = self.session.post(
dp_root.format('my-library/ajax/ajaxGetBookActionOptions'),
data={
'bookId': book['book_id']
},
cookies=self.session.cookies
)
# Get JSON
dl_json = dl_data.json()
# Check for status
if not dl_json['status']:
sys.exit('Error: could not retrieve book download manifest')
# Get manifest
manifest = dl_json['manifest']
# Set up file regexes
file_regex = r'\.{0}$'.format(self.config['filetype'])
file_part_regex = r'^File (\d+) of \d+$'
# Return only correct file type
files = []
for file_name in manifest.keys():
if re.search(file_regex, file_name, re.I):
file = manifest[file_name]
# Parse file part number
part = re.match(file_part_regex, file['countOf'], re.I)
# Check for match
if not part:
sys.exit('Error: could not parse book download part')
# Set file part number
file['part'] = int(part.group(1))
# Add to files list
files.append(file)
# Sort files by part number
sorted_files = sorted(files, key=lambda k: k['part'], reverse=False)
# Return sorted file list
return sorted_files
def get_download_url(self, file_info):
"""Retrieve Downpour book file download URL.
Args:
file_info (dict): File part information
Retrieved from API call in `get_book_file_data`
Returns:
str: Download URL for book part file
"""
dl_url = self.session.post( # Not a cached request as the URL expires
self.downpour['root'].format('my-library/ajax/ajaxDLBookBD'),
cookies=self.session.cookies,
data={
'bdfile': file_info['filename'],
'niceName': file_info['prettyName']
}
)
# Get JSON response
dl_json = dl_url.json()
# Check for success
if not dl_json['status']:
sys.exit('Error: could not retrieve the book download URL(s)')
# Return download URL
return dl_json['link']
def get_book_path(self, book):
"""Get and create the download file path for a book.
Args:
book (dict): Downpour book information
Returns:
str: Absolute path to book download target folder
"""
template = u'{author}/{title}'
# Check for user-specified template
if ('template' in self.config.keys() and
self.config['template'] is not None):
# Convert str to unicode if this is Python 2
if sys.version_info[0] < 3:
template = unicode(self.config['template'], 'utf-8')
else:
template = self.config['template']
# Format folder path from template
book_folder = template.format(
title=book['title'],
author=', '.join(book['author'].split('|')),
book_id=book['book_id']
)
# Join book folder to user folder
book_path = os.path.join(self.config['folder_abs'], book_folder)
# Create folders if they don't exist
if not os.path.exists(book_path):
os.makedirs(book_path)
# Return
return book_path
def download_book_file(self, file_data, file_path, output=None):
"""Download book part file from Downpour and rename it.
Args:
file_data (dict): File part information
Retrieved from API call in `get_book_file_data`
file_path (str): Absolute path to download target file
output (int, optional): Override the class-level output type
"""
if output is None:
output = self.output
# Exit if this file already exists
if os.path.isfile(file_path):
if output is self.__class__.CLI:
print(
"Warning: file '%s' already exists, skipping" % file_path,
file=sys.stderr
)
return
# Get download URL
file_url = self.get_download_url(file_data)
# Get target folder
out_folder = os.path.dirname(file_path)
# Check folder permissions
self._check_folder_permissions(out_folder)
# Open file download stream
stream = requests.get(file_url, stream=True)
# Read and download from file stream
with open(file_path, 'wb') as handle:
chunk_size = 1024
# Determine if we need a progress bar
if output is self.__class__.CLI:
# Set up progress bar data
total_length = int(stream.headers.get('content-length'))
expected_size = (total_length / chunk_size) + 1
# Set progress bar chunks
chunks = progress.bar(
stream.iter_content(chunk_size=chunk_size),
expected_size=expected_size
)
else:
# Use standard, silent stream
chunks = stream.iter_content(chunk_size=chunk_size)
# Download file
for chunk in chunks:
if chunk:
handle.write(chunk)
handle.flush()
# Set error message:
error = 'Error: there was a problem downloading the file: {0}'
# Check that the file was downloaded
if not os.path.isfile(file_path):
sys.exit(error.format(file_path))
def download_book(self, book_id, output=None):
"""Download all available book part files from Downpour.
Args:
book_id (str): Downpour book ID
output (int, optional): Override the class-level output type
Returns:
list, str: Downloaded book data, depending on output type
Can be list of new files, JSON array, or formatted ascii text
"""
if output is None:
output = self.output
# Get book from library
book = self.get_book_by_id(book_id)
# Retrieve book file information
book_file_data = self.get_book_file_data(book)
# Count how many book parts
parts = len(book_file_data)
# Track downloaded files
downloaded_files = []
# Get path to download folder
book_path = self.get_book_path(book)
# Print CLI update
if output is self.__class__.CLI:
print(
u'== "{title}" by {author} ==\n+ Path: {path}'.format(
title=book['title'],
author=u', '.join(book['author'].split('|')),
path=book_path
),
file=sys.stdout
)
# Download each book part
for file_data in book_file_data:
# Get file part number
part = file_data['part']
# Get file part
file_part = ', Part {0}'.format(part) if parts > 1 else ''
# Get file name
file_name = '{book_title}{file_part}.{file_type}'.format(
book_title=file_data['title'],
file_part=file_part,
file_type=file_data['ext']
)
# Set file path
file_path = os.path.join(book_path, file_name)
# Print status update
if output is self.__class__.CLI:
print(
'+ {count}: "{name}"'.format(
count=file_data['countOf'],
name=file_name
),
file=sys.stdout
)
# Download the file
self.download_book_file(file_data, file_path)
# Add book to list
downloaded_files.append(file_path)
# Return downloaded files
if output is self.CLI:
print('+ Done.', file=sys.stdout)
else:
return downloaded_files
class ScriptAction(argparse.Action):
"""Custom script validation action for argparse."""
def __call__(self, argparser, namespace, values, option_string=None):
"""Check that action has book IDs, if-needed."""
if not len(values) and namespace.action in ['download', 'book']:
argparser.error(
'Missing book ID{s} for {action}'.format(
action=namespace.action,
s='(s)' if namespace.action == 'download' else ''
)
)
# Set value in namespace object
setattr(namespace, self.dest, values)
class FileAction(argparse.Action):
"""Custom files validation action for argparse."""
def __call__(self, argparser, namespace, values, option_string=None):
"""Check that file provided exists."""
file_path = os.path.abspath(values)
# Check that file exists
if not os.path.exists(file_path):
error = 'Path provided for {0} {1} {2}'.format(
self.dest, 'does not exist:', values)
argparser.error(error)
# Set value in namespace object
setattr(namespace, self.dest, values)
class DownpourArgumentParser(argparse.ArgumentParser):
"""Custom command-line argument parser for argparse."""
def error(self, message):
"""Display a simple help message via stdout before error messages."""
sys.stdout.write('Use `%s --help` to view more options\n' % self.prog)
# Display error message and exit
sys.exit('Error: {0}'.format(message))
def print_help(self, files=None):
"""Make the printed help message look nicer.
Adds padding before and after the text and by adding the program's
title and version information in the header.
"""
sys.stdout.write(get_version())
# Call super
super(DownpourArgumentParser, self).print_help(files)
def get_version(extra=True):
"""Format program version information.
Args:
extra (bool): When True adds extra information to output
Returns:
str: Formatted program version information
"""
return '{title} v{version}{extra}\n'.format(
title=__title__,
version=__version__,
extra=' / by {author} <{email}>\n'.format(
author=__author__,
email=__email__
) if extra else ''
)
def truncate(string, length=20):
"""Truncate a string and add ellipsis, if-needed.
Args:
string (str): String to be truncated
length (int, optional): Lenth of string to truncate
Returns:
str: Truncated string
"""
ellipsis = '..'
# Append ellipsis if-needed
if len(string) > length:
# Trim string to new length + ellipsis
trunc = string[0:length-len(ellipsis)].strip()
trunc += ellipsis
else:
# Trim string to new length
trunc = string[0:length].strip()
# Return truncated string
return trunc
# Run the script from the command-line
if __name__ == '__main__':
# Connect to Downpour object
EDD = EMDownpourDownloader(output=EMDownpourDownloader.CLI)
# Get output from main function
__output__ = EDD.do_action()
# Check for returned output
if __output__ is not None:
print(__output__, file=sys.stdout)
|
import numpy as np
import networkx as nx
from sklearn import metrics
import hypercomparison.utils
logger = hypercomparison.utils.get_logger(__name__)
class LinkPredictionTask(object):
"""
Link prediction object
Given a test edges and negative edges and embedding, calculate ROC_AUC values.
"""
def __init__(
self,
test_edges,
negative_edges,
emb,
name,
superG,
beta
):
"""
:param test_edges: list of test edges
:param negative_edges: list of negative edges
:param emb: embedding results
:param name: name of model for record
:param shortest_path_distance_dict::dict: {(supernode1, supernode2): float} pairwise weighted shortest path length distance of supernetwork
:param beta::float: distance tunable parameter
"""
self.test_edges = test_edges
self.negative_edges = negative_edges
self.emb = emb
self.name = name
self.superG = superG
self.beta = beta
logger.info("shortest path length of supernetwork start:")
if len(superG.nodes) < 30000:
self.superG_shortest_path_length = dict(nx.shortest_path_length(superG, weight='weight'))
else:
self.superG_shortest_path_length = np.zeros((len(superG.nodes), len(superG.nodes)))
superG_nodes_list = list(superG.nodes())
temp = 0
for node in superG_nodes_list:
temp_distance = dict(nx.shortest_path_length(superG, node, weight='weight'))
for i in superG_nodes_list:
self.superG_shortest_path_length[node][i] = temp_distance[i]
temp += 1
if temp % 10000 == 0:
logger.info("shortest path length of supernetwork progress: {}/{}".format(temp, len(superG_nodes_list)))
logger.info("shortest path length of supernetwork complete!")
self.link_prediction_score_positive = []
self.link_prediction_score_negative = []
def do_link_prediction(self):
"""
Execute link prediction
"""
self.calculate_link_prediction_score()
roc_auc_value, aupr_value, average_precision, precision_score = self.calculate_different_metrics()
logger.info(self.name)
logger.info(roc_auc_value)
return roc_auc_value, aupr_value, average_precision, precision_score
def _calculate_community_distance(self, node_x, node_y):
if not hasattr(self, 'superG_shortest_path_length'):
shortest_path_length = nx.shortest_path_length(self.superG, self.emb[node_x][1], self.emb[node_y][1], weight='weight')
else:
shortest_path_length = self.superG_shortest_path_length[
self.emb[node_x][1]][self.emb[node_y][1]
]
result = self.beta*shortest_path_length - (1 - self.beta)*np.log(self.emb[node_x][0])
return result
def calculate_link_prediction_score(self):
"""
Calculate similarity score for test and negative edges
"""
logger.info("Calculate link prediction score positive")
self.link_prediction_score_positive = np.array(
self.calculate_score(self.test_edges)
)
logger.info("Calculate link prediction score negative")
self.link_prediction_score_negative = np.array(
self.calculate_score(self.negative_edges)
)
def calculate_score(self, edge_list):
"""
Calculate similarity score for edge_list
:param edge_list: list of target edges.
:return: score_list: score list of given edge_lists
"""
#score_list = []
#for source, target in edge_list:
# if len(score_list)%10000 == 0:
# logger.info("progress, {}/{}".format(len(score_list), len(edge_list)))
# score_list.append(-1*(self._calculate_community_distance(source, target) + self._calculate_community_distance(target, source))/2)
score_list = [-1*(self._calculate_community_distance(source, target) + self._calculate_community_distance(target, source))/2 for source, target in edge_list]
return score_list
def calculate_different_metrics(self):
"""
Calculate ROC_AUC values
Calculate AUPR--area under precision-recall curve
Calculate Average precision score
Calculate precision score
"""
logger.info("Calculate ROC_AUC values")
y_true = np.concatenate(
[
np.ones_like(self.link_prediction_score_positive),
np.zeros_like(self.link_prediction_score_negative),
]
)
y_score = np.concatenate(
[self.link_prediction_score_positive, self.link_prediction_score_negative],
axis=0,
)
y_median = np.median(y_score)
y_predict = np.where(y_score > y_median, 1, 0)
roc_auc_value = metrics.roc_auc_score(y_true, y_score)
precision, recall, _ = metrics.precision_recall_curve(y_true, y_score)
aupr_value = metrics.auc(recall, precision)
average_precision = metrics.average_precision_score(y_true, y_score)
precision_score = metrics.precision_score(y_true, y_predict)
return roc_auc_value, aupr_value, average_precision, precision_score |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Plotting of variables, adjectives, ... using gnuplot"""
__revision__ = "$Id: doc.py,v 1.15 2010-10-29 19:24:41 rliebscher Exp $"
import sys
import Gnuplot
import Gnuplot.funcutils
def getMinMax(set):
"""get tuple with minimum and maximum x-values used by the set."""
x_min = None
x_max = None
for x in set.getValuesX():
if x_min is None:
x_min = x
x_max = x
return (x_min,x_max)
def getGlobalMinMax(sets):
"""get tuple with minimum and maximum x-values used by the sets of this dicts of sets."""
x_min = None
x_max = None
for s in sets.values():
(x_min2,x_max2) = getMinMax(s)
if x_min is None or x_min2 < x_min:
x_min = x_min2
if x_max is None or x_max2 > x_max:
x_max = x_max2
return (x_min,x_max)
def getPoints(sets):
"""Collect all important points of all adjectives in this dict of sets."""
from fuzzy.set.Set import merge
# merge them all
temp = None
for s in sets.values():
if temp is None:
temp = s
else:
temp = merge(max,temp,s)
# collect points
# >>> result of merge is always a Polygon object
points = [p[0] for p in temp.points]
# avoid to have same value twice (filter points out where successor is equal)
return points[:1] + [p0 for p0,p1 in zip(points[1:],points) if p0!=p1]
def getSets(variable):
"""Get all sets of adjectives in this variable."""
sets = {}
for a_name,adj in variable.adjectives.items():
sets[a_name] = adj.set
return sets
class Doc(object):
"""Main object. Get an instance of this to do your work."""
def __init__(self,directory="doc"):
self.directory = directory
self.overscan = 0.1 #: the plotted range is M{[min-o,max+o]} with M{o=(max-min)*overscan}
def setTerminal(self,g,filename):
g("set terminal png small transparent truecolor nocrop")
g("set output '%s/%s.png'" % (self.directory,filename))
def initGnuplot2D(self,filename="plot",xlabel=None,ylabel=None,title=None,xrange_=None,yrange=None,x_logscale=0,y_logscale=0):
g = Gnuplot.Gnuplot(debug=0)
self.setTerminal(g,filename)
# pylint: disable=C0321
if xlabel is not None: g.xlabel(xlabel)
if ylabel is not None: g.ylabel(ylabel)
if title is not None: g.title(title)
if xrange_ is not None: g('set xrange [%f:%f]' % xrange_)
else: g('set autoscale x')
if yrange is not None: g('set yrange [%f:%f]' % yrange)
else: g('set autoscale y')
if x_logscale: g('set logscale x'); g('set autoscale x')
if y_logscale: g('set logscale y'); g('set autoscale y')
return g
def initGnuplot3D(self,filename="plot3D",xlabel=None,ylabel=None,zlabel=None,title=None,xrange_=None,yrange=None,zrange=None,x_logscale=0,y_logscale=0,z_logscale=0):
g = Gnuplot.Gnuplot(debug=0)
self.setTerminal(g,filename)
# pylint: disable=C0321
if xlabel is not None: g.xlabel(xlabel)
if ylabel is not None: g.ylabel(ylabel)
if zlabel is not None: g("set zlabel '%s'" % zlabel)
if title is not None: g.title(title)
if xrange_ is not None: g('set xrange [%f:%f]' % xrange_)
else: g('set autoscale x')
if yrange is not None: g('set yrange [%f:%f]' % yrange)
else: g('set autoscale y')
if zrange is not None: g('set zrange [%f:%f]' % zrange)
else: g('set autoscale z')
if x_logscale: g('set logscale x');g('set autoscale x')
if y_logscale: g('set logscale y');g('set autoscale y')
if z_logscale: g('set logscale z');g('set autoscale z')
g('set style data lines')
g('set hidden')
g('set pm3d at s')
g('set pm3d ftriangles interpolate 50,50')
g('set contour surface')
return g
def getValues(self,v):
return self.getValuesSets(getSets(v))
def getValuesSets(self,sets):
(x_min,x_max) = getGlobalMinMax(sets)
width = x_max - x_min
x_min = x_min - self.overscan * width
x_max = x_max + self.overscan * width
width = x_max - x_min
values = [x_min]+getPoints(sets)+[x_max]
return (x_min,x_max,values)
def createDoc(self,system):
"""create plots of all variables defined in the given system."""
from fuzzy.OutputVariable import OutputVariable
from fuzzy.InputVariable import InputVariable
import fuzzy.defuzzify.Dict
import fuzzy.fuzzify.Dict
for name,var in system.variables.items():
if isinstance(var,OutputVariable) and isinstance(var.defuzzify,fuzzy.defuzzify.Dict.Dict):
sys.stderr.write("ignore variable %s because it is of type OutputVariable => Dict\n" % name)
elif isinstance(var,InputVariable) and isinstance(var.fuzzify,fuzzy.fuzzify.Dict.Dict):
sys.stderr.write("ignore variable %s because it is of type InputVariable => Dict\n" % name)
else:
self.createDocVariable(var,name)
def createDocVariable(self,v,name,x_logscale=0,y_logscale=0):
"""Creates a 2D plot of a variable"""
self.createDocSets(getSets(v),name,x_logscale,y_logscale,description=v.description,units=v.unit)
def createDocSets(self,sets,name,x_logscale=0,y_logscale=0,description=None,units=None):
"""Creates a 2D plot of dict of sets"""
import fuzzy.set.Polygon
# sort sets by lowest x values and higher membership values next
def sort_key(a):
s = sets[a]
x = s.getValuesX().next()
return (x,-s(x))
(x_min,x_max,x) = self.getValuesSets(sets)
# calculate values
plot_items = []
for s_name in sorted(sets,key=sort_key):
s = sets[s_name]
if isinstance(s,fuzzy.set.Polygon.Polygon):
p = [(x_min,s(x_min))] + s.points + [(x_max,s(x_max))]
plot_item = Gnuplot.PlotItems.Data(p,title=s_name)
else:
plot_item = Gnuplot.funcutils.compute_Data(x,s,title=s_name)
plot_items.append(plot_item)
xlabel = description or ""
if units is not None:
xlabel += " [%s]" % units
g = self.initGnuplot2D(filename=name,xlabel=xlabel,ylabel="membership",title=name,xrange_=(x_min,x_max),yrange=(-0.2,1.2),x_logscale=x_logscale,y_logscale=y_logscale)
g('set style fill transparent solid 0.5 border')
g('set style data filledcurves y1=0')
g.plot(*plot_items)
g.close()
def create2DPlot(self,system,x_name,y_name,input_dict=None,output_dict=None,x_logscale=0,y_logscale=0):
"""Creates a 2D plot of an input variable and an output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of output variable used for y coordinate values
@type y_name: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
"""
input_dict = input_dict or {}
output_dict = output_dict or {}
(x_min,x_max,x) = self.getValues(system.variables[x_name])
def f(x):
input_dict[x_name] = x
output_dict[y_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[y_name]
g = self.initGnuplot2D(filename=x_name+"_"+y_name,xlabel=x_name,ylabel=y_name,title=y_name+"=f("+x_name+")",xrange_=(x_min,x_max),x_logscale=x_logscale,y_logscale=y_logscale)
g('set style data lines')
g.plot(Gnuplot.funcutils.compute_Data(x, f))
g.close()
def create3DPlot(self,system,x_name,y_name,z_name,input_dict=None,output_dict=None,x_logscale=0,y_logscale=0,z_logscale=0):
"""Creates a 3D plot of 2 input variables and an output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of input variable used for y coordinate values
@type y_name: string
@param z_name: name of output variable used for z coordinate values
@type z_name: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
@param z_logscale: use logarithmic scale for z values
@type z_logscale: bool
"""
input_dict = input_dict or {}
output_dict = output_dict or {}
(x_min,x_max,x) = self.getValues(system.variables[x_name])
(y_min,y_max,y) = self.getValues(system.variables[y_name])
def f(x,y):
input_dict[x_name] = x
input_dict[y_name] = y
output_dict[z_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[z_name]
g = self.initGnuplot3D(filename=x_name+"_"+y_name+"_"+z_name,xlabel=x_name,ylabel=y_name,zlabel=z_name,title="%s=f(%s,%s)" % (z_name,x_name,y_name),xrange_=(x_min,x_max),yrange=(y_min,y_max),x_logscale=x_logscale,y_logscale=y_logscale,z_logscale=z_logscale)
g.splot(Gnuplot.funcutils.compute_GridData(x,y, f,binary=0))
g.close()
def create3DPlot_adjective(self,system,x_name,y_name,z_name,adjective,input_dict=None,output_dict=None,x_logscale=0,y_logscale=0,z_logscale=0):
"""Creates a 3D plot of 2 input variables and an adjective of the output variable.
Other (const) variables have to be set beforehand in the dictionary input_dict.
@param system: the fuzzy system to use
@type system: L{fuzzy.System.System}
@param x_name: name of input variable used for x coordinate values
@type x_name: string
@param y_name: name of input variable used for y coordinate values
@type y_name: string
@param z_name: name of output variable used for z coordinate values
@type z_name: string
@param adjective: name of adjective of output variable used for z coordinate values
@type adjective: string
@param input_dict: dictionary used for input values, can be used to predefine other input values
@type input_dict: dict
@param output_dict: dictionary used for output values
@type output_dict: dict
@param x_logscale: use logarithmic scale for x values
@type x_logscale: bool
@param y_logscale: use logarithmic scale for y values
@type y_logscale: bool
@param z_logscale: use logarithmic scale for z values
@type z_logscale: bool
"""
input_dict = input_dict or {}
output_dict = output_dict or {}
(x_min,x_max,x) = self.getValues(system.variables[x_name])
(y_min,y_max,y) = self.getValues(system.variables[y_name])
def f(x,y):
input_dict[x_name] = x
input_dict[y_name] = y
output_dict[z_name] = 0.0
system.calculate(input_dict,output_dict)
return output_dict[z_name][adjective]
g = self.initGnuplot3D(filename=x_name+"_"+y_name+"_"+z_name+"_"+adjective,xlabel=x_name,ylabel=y_name,zlabel=z_name,title="%s.%s=f(%s,%s)" % (z_name,adjective,x_name,y_name),xrange_=(x_min,x_max),yrange=(y_min,y_max),zrange=(0,1),x_logscale=x_logscale,y_logscale=y_logscale,z_logscale=z_logscale)
g("set xyplane at 0")
g("set cntrparam levels incremental 0.1,0.2,1.0")
g.splot(Gnuplot.funcutils.compute_GridData(x,y, f,binary=0))
g.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-25 17:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('estoque', '0002_initial_data'),
('estoque', '0002_auto_20170625_1450'),
]
operations = [
]
|
import csv
import random
import time
import argparse
import os
import cv2
import flow_vis
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from network import *
from utils import *
from metrics import PSNR, SSIM, MSE
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# Configure
parser = argparse.ArgumentParser(description='Training Config', add_help=False)
# Model
parser.add_argument('--in-features', type=int, default=3, metavar='N',
help='the number of input features (default: 2)')
parser.add_argument('--out-features', type=int, default=5, metavar='N',
help='the number of output features (default: 5)')
parser.add_argument('--hidden-features', type=int, default=256, metavar='N',
help='the number of hidden units (default: 256)')
parser.add_argument('--hidden-layers', type=int, default=3, metavar='N',
help='the number of layers (default: 3)')
# Training
parser.add_argument('--seed', type=int, default=50236, metavar='S',
help='random seed (default: 50236)')
parser.add_argument('--lr', type=float, default=0.001, metavar='N',
help='learning rate (default: 0.001)')
parser.add_argument('--training-step', type=int, default=30000, metavar='N',
help='the number of training iterations (default: 30000)')
parser.add_argument('--flow-warmup-step', type=int, default=2000, metavar='N',
help='flow only training warmup (default: 3000)')
parser.add_argument('--image-warmup-step', type=int, default=5000, metavar='N',
help='flow only training warmup (default: 5000)')
parser.add_argument('--data-dir', type=str, default='./data', help='video name')
parser.add_argument('--video-name', type=str, default='alley_1', help='video name')
parser.add_argument('--start-frame', type=int, default=0, metavar='N',
help='the starting frame (default: 0)')
parser.add_argument('--num-frames', type=int, default=7, metavar='N',
help='the number of frames (default: 7)')
parser.add_argument('--jpeg-quality', type=int, default=90, metavar='N',
help='jpeg quality (default: 90)')
parser.add_argument('--use-estimator', action='store_true', default=False)
parser.add_argument('--tag', type=str, default='temp', help='tag')
parser.add_argument('--checkpoint-iter', type=int, default=0, metavar='N', help='checkpoint iteration')
if __name__=='__main__':
args = parser.parse_args()
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.manual_seed(args.seed)
torch.cuda.set_device(0)
target_frames, target_flow, key_frame, key_frame_size = load_video(
args.data_dir, args.video_name, args.num_frames, args.start_frame,
args.jpeg_quality, tag=args.tag, use_estimator=args.use_estimator)
target_flow = target_flow.permute(0,2,3,1).cuda()
target_frames = target_frames.cuda()
key_frame = key_frame.cuda()
T = args.num_frames
H,W = target_frames.shape[2:]
input_grid = make_input_grid(T, H, W)
input_grid = input_grid.reshape(T, -1, args.in_features)
flow_grid = make_flow_grid(H,W)
flow_grid = flow_grid.unsqueeze(0)
target_residual = []
key_index = int((T/2))
for t in reversed(range(key_index)):
target_flow_grid_shift = apply_flow(flow_grid, target_flow[t].unsqueeze(0), H, W, direction='rl')
warped_im_target = F.grid_sample(target_frames[t+1].unsqueeze(0),
target_flow_grid_shift, padding_mode='border', align_corners=True)
target_residual.insert(0,warped_im_target - target_frames[t])
for t in range(key_index+1, T):
target_flow_grid_shift = apply_flow(flow_grid, target_flow[t-1].unsqueeze(0), H, W, direction='lr')
warped_im_target = F.grid_sample(target_frames[t-1].unsqueeze(0),
target_flow_grid_shift, padding_mode='border', align_corners=True)
target_residual.append(warped_im_target - target_frames[t])
target_residual = torch.concat(target_residual, 0)
net = Siren(hidden_features = args.hidden_features,
hidden_layers = args.hidden_layers,
in_features = args.in_features,
out_features = args.out_features,
outermost_linear = True)
net.cuda()
optimizer = optim.Adam(net.parameters(), lr = args.lr)
total_num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
model_byte_size = total_num_params*4+key_frame_size
metrics_list = []
if args.checkpoint_iter > 0:
dirname = "./results/{}/{}/".format(args.video_name, args.tag)
model_path = dirname + "model_{:05d}.pt".format(args.checkpoint_iter)
checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['model_state_dict'])
metrics_list = np.loadtxt(dirname+"metrics.csv", delimiter=',').tolist()
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start = time.time()
metrics = [MSE(), PSNR(), SSIM()]
max_iter = args.training_step + args.checkpoint_iter
for epoch in range(args.checkpoint_iter, max_iter):
net.train()
optimizer.zero_grad()
metric_epoch = [0.0]*(len(metrics)+2)
for t in reversed(range(key_index)):
flow, residual = net(input_grid[t])
flow = flow.reshape(1,H,W,2)
residual = residual.permute(1,0).reshape(1,3,H,W)
# making flow grid for grid_sample
# we are sampling pixels from the previous frame (right -> left)
flow_grid_shift = apply_flow(flow_grid, flow, H, W, direction='rl')
# warping image based on reconstred image or ground truth image
if epoch <= args.image_warmup_step:
if t==key_index-1:
warped_im = F.grid_sample(key_frame.unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(target_frames[t+1].unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
if t==key_index-1:
warped_im = F.grid_sample(key_frame.unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(reconstructed_im.detach(), flow_grid_shift, padding_mode='border', align_corners=True)
reconstructed_im = warped_im + residual
# metrics
for i in range(len(metrics)):
metric_epoch[i]+=metrics[i](reconstructed_im, target_frames[t].unsqueeze(0)).item()
# reconstructed error
im_error = reconstructed_im - target_frames[t].unsqueeze(0)
im_loss = torch.norm(im_error)
# flow error
flow_error = flow - target_flow[t].unsqueeze(0)
metric_epoch[-2] += float(torch.mean(flow_error.detach()**2))
flow_loss = torch.norm(flow_error)
# residual error
residual_error = residual - target_residual[t].unsqueeze(0)
metric_epoch[-1] += float(torch.mean(residual_error.detach()**2))
residual_loss = torch.norm(residual_error)
if epoch <= args.flow_warmup_step:
loss = flow_loss+residual_loss
else:
loss = im_loss
loss.backward()
for t in range(key_index+1, T):
flow, residual = net(input_grid[t-1])
flow = flow.reshape(1,H,W,2)
residual = residual.permute(1,0).reshape(1,3,H,W)
# making flow grid for grid_sample
# we are sampling pixels from the previous frame (left -> right)
flow_grid_shift = apply_flow(flow_grid, flow, H, W, direction='lr')
# warping image based on reconstred image or ground truth image
if epoch <= args.image_warmup_step:
if t==key_index+1:
warped_im = F.grid_sample(key_frame.unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(target_frames[t-1].unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
if t==key_index+1:
warped_im = F.grid_sample(key_frame.unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(reconstructed_im.detach(), flow_grid_shift, padding_mode='border', align_corners=True)
reconstructed_im = warped_im + residual
# metrics
for i in range(len(metrics)):
metric_epoch[i]+=metrics[i](reconstructed_im, target_frames[t].unsqueeze(0)).item()
# reconstructed error
im_error = reconstructed_im - target_frames[t].unsqueeze(0)
im_loss = torch.norm(im_error)
# flow error
flow_error = flow - target_flow[t-1].unsqueeze(0)
metric_epoch[-2] += float(torch.mean(flow_error.detach()**2))
flow_loss = torch.norm(flow_error)
# residual error
residual_error = residual - target_residual[t-1].unsqueeze(0)
metric_epoch[-1] += float(torch.mean(residual_error.detach()**2))
residual_loss = torch.norm(residual_error)
if epoch <= args.flow_warmup_step:
loss = flow_loss+residual_loss
else:
loss = im_loss
loss.backward()
# updating after entire frames
optimizer.step()
# logging
net.eval()
if epoch % 50 == 0:
for i in range(len(metrics)):
metric_epoch[i]+=metrics[i](key_frame.unsqueeze(0), target_frames[key_index].unsqueeze(0)).item()
metric_epoch = [m/T for m in metric_epoch]
metrics_list.append([model_byte_size]+metric_epoch)
print("[epoch {}] Size(bytes): {}, [MSE, PSNR, SSIM, MSE(flow), MSE(residual)]: [{:.06f}, {:.06f}, {:.06f}, {:.06f}, {:.06f}]".format(
epoch, model_byte_size, metric_epoch[0], metric_epoch[1], metric_epoch[2], metric_epoch[3], metric_epoch[4]))
if epoch % 5000 == 0:
dirname = "./results/{}/{}/".format(args.video_name, args.tag)
epoch_dirname = dirname + "{}_{}/".format(args.tag, epoch)
if not os.path.exists(epoch_dirname):
os.makedirs(epoch_dirname)
# from left to key frame
for t in reversed(range(key_index)):
pred_flow, residual = net(input_grid[t])
pred_flow = pred_flow.reshape(1,H,W,2)
residual = residual.permute(1,0).reshape(1,3,H,W)
flow_grid_shift = apply_flow(flow_grid, pred_flow, H, W, direction='rl')
target_flow_grid_shift = apply_flow(flow_grid, target_flow[t].unsqueeze(0), H, W, direction='rl')
# using key frames
if t==key_index-1:
warped_im = F.grid_sample(target_frames[t+1].unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(reconstructed_im, flow_grid_shift, padding_mode='border', align_corners=True)
warped_im_target = F.grid_sample(target_frames[t+1].unsqueeze(0), target_flow_grid_shift, padding_mode='border', align_corners=True)
reconstructed_im = (warped_im + residual).detach()
pred_flow_im = flow2img(pred_flow.detach().squeeze().cpu().numpy())
target_flow_im = flow2img(target_flow[t].detach().squeeze().cpu().numpy())
residual_im = torch.abs(residual)
residual_im_target = torch.abs((warped_im_target - target_frames[t]))
show_tensor_to_image(warped_im.reshape(3, H, W), epoch_dirname+"warped_im_{:05d}".format(t))
show_tensor_to_image(residual_im.reshape(3, H, W), epoch_dirname+"residual_im_{:05d}".format(t))
show_tensor_to_image(reconstructed_im.reshape(3, H, W), epoch_dirname+"reconstructed_im_{:05d}".format(t))
show_tensor_to_image(torch.Tensor(pred_flow_im/255).permute(2,0,1), epoch_dirname+"/pred_flow_{:05d}".format(t))
show_tensor_to_image(warped_im_target.reshape(3, H, W), epoch_dirname+"warped_im_target_{:05d}".format(t))
show_tensor_to_image(residual_im_target.reshape(3, H, W), epoch_dirname+"residual_im_target{:05d}".format(t))
show_tensor_to_image(torch.Tensor(target_flow_im/255).permute(2,0,1), epoch_dirname+"/target_flow_{:05d}".format(t))
show_tensor_to_image(target_frames[t], epoch_dirname+"target_frame_{:05d}".format(t))
# to store key frame
target_flow_im = flow2img(target_flow[key_index].detach().squeeze().cpu().numpy())
show_tensor_to_image(torch.Tensor(target_flow_im/255).permute(2,0,1), epoch_dirname+"/target_flow_{:05d}".format(key_index))
show_tensor_to_image(target_frames[key_index], epoch_dirname+"target_frame_{:05d}".format(key_index))
# from key frame to right
for t in range(key_index+1, T):
pred_flow, residual = net(input_grid[t-1])
pred_flow = pred_flow.reshape(1,H,W,2)
residual = residual.permute(1,0).reshape(1,3,H,W)
flow_grid_shift = apply_flow(flow_grid, pred_flow, H, W, direction='lr')
target_flow_grid_shift = apply_flow(flow_grid, target_flow[t-1].unsqueeze(0), H, W, direction='lr')
# using key frames
if t==key_index+1:
warped_im = F.grid_sample(target_frames[t-1].unsqueeze(0), flow_grid_shift, padding_mode='border', align_corners=True)
else:
warped_im = F.grid_sample(reconstructed_im, flow_grid_shift, padding_mode='border', align_corners=True)
warped_im_target = F.grid_sample(target_frames[t-1].unsqueeze(0), target_flow_grid_shift, padding_mode='border', align_corners=True)
reconstructed_im = (warped_im + residual).detach()
pred_flow_im = flow2img(pred_flow.detach().squeeze().cpu().numpy())
target_flow_im = flow2img(target_flow[t-1].detach().squeeze().cpu().numpy())
residual_im = torch.abs(residual)
residual_im_target = torch.abs((warped_im_target - target_frames[t]))
show_tensor_to_image(warped_im.reshape(3, H, W), epoch_dirname+"warped_im_{:05d}".format(t))
show_tensor_to_image(residual_im.reshape(3, H, W), epoch_dirname+"residual_im_{:05d}".format(t))
show_tensor_to_image(reconstructed_im.reshape(3, H, W), epoch_dirname+"reconstructed_im_{:05d}".format(t))
show_tensor_to_image(torch.Tensor(pred_flow_im/255).permute(2,0,1), epoch_dirname+"/pred_flow_{:05d}".format(t))
show_tensor_to_image(warped_im_target.reshape(3, H, W), epoch_dirname+"warped_im_target_{:05d}".format(t))
show_tensor_to_image(residual_im_target.reshape(3, H, W), epoch_dirname+"residual_im_target{:05d}".format(t))
show_tensor_to_image(torch.Tensor(target_flow_im/255).permute(2,0,1), epoch_dirname+"/target_flow_{:05d}".format(t))
show_tensor_to_image(target_frames[t], epoch_dirname+"target_frame_{:05d}".format(t))
if (epoch+1) % 10000 == 0:
model_path = dirname + "model_{:05d}.pt".format(epoch+1)
torch.save({'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, model_path)
np.savetxt(dirname+"metrics.csv", metrics_list, delimiter=",")
end = time.time()
print("training_time: {}".format(end - start))
|
# Copyright (c) 2017 Blemundsbury AI Limited
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
import json
from requests import Session
from requests_toolbelt.multipart import encoder
from .exceptions import CapeException
from .utils import check_list
import string
API_VERSION = 0.1
class CapeClient:
"""
The CapeClient provides access to all methods of the Cape API.
"""
def __init__(self, api_base, admin_token=None):
"""
:param api_base: The URL to send API requests to.
:param admin_token: An admin token to authenticate with.
"""
self.api_base = "%s/%s" % (api_base, API_VERSION)
self.session = Session()
self.session_cookie = False
self.admin_token = admin_token
self.user_token = None
def _raw_api_call(self, method, parameters=None, monitor_callback=None):
if parameters is None:
parameters={}
url = "%s/%s" % (self.api_base, method)
if 'token' in parameters:
token = parameters.pop('token')
url += "?token=%s" % token
elif self.admin_token:
url += "?adminToken=%s" % self.admin_token
if 'documentIds' in parameters and not isinstance(parameters['documentIds'], str):
parameters['documentIds'] = json.dumps(parameters['documentIds'])
if parameters:
m = encoder.MultipartEncoderMonitor.from_fields(fields=parameters, encoding='utf-8',
callback=monitor_callback)
if self.session_cookie:
r = self.session.post(url, data=m, cookies={'session': self.session_cookie},
headers={'Content-Type': m.content_type})
else:
r = self.session.post(url, data=m, headers={'Content-Type': m.content_type})
else:
if self.session_cookie:
r = self.session.get(url, cookies={'session': self.session_cookie})
else:
r = self.session.get(url)
if r.status_code == 200 and r.json()['success']:
return r
else:
raise CapeException(r.json()['result']['message'])
def login(self, login, password):
"""
Log in to the Cape API as an AI builder.
:param login: The username to log in with.
:param password: The password to log in with.
:return:
"""
r = self._raw_api_call('user/login', {'login': login, 'password': password})
self.session_cookie = r.cookies['session']
def logged_in(self):
"""
Reports whether we're currently logged in.
:return: Whether we're logged in or not.
"""
return self.session_cookie != False or self.admin_token != None
def logout(self):
"""
Log out and clear the current session cookie.
:return:
"""
self._raw_api_call('user/logout')
self.session_cookie = False
self.user_token = None
def get_admin_token(self):
"""
Retrieve the admin token for the currently logged in user.
:return: An admin token.
"""
r = self._raw_api_call('user/get-admin-token')
return r.json()['result']['adminToken']
def get_user_token(self):
"""
Retrieve a user token suitable for making 'answer' requests.
:return: A user token.
"""
r = self._raw_api_call('user/get-user-token')
return r.json()['result']['userToken']
def get_profile(self):
"""
Retrieve the current user's profile.
:return: A dictionary containing the user's profile.
"""
r = self._raw_api_call('user/get-profile')
return r.json()['result']
def get_default_threshold(self):
"""
Retrieve the default threshold used if one isn't explicitly specified when calling answer().
:return: The current default threshold (either 'verylow', 'low', 'medium', 'high' or 'veryhigh').
"""
r = self._raw_api_call('user/get-default-threshold')
return r.json()['result']['threshold']
def set_default_threshold(self, threshold):
"""
Set the default threshold used if one isn't explicitly specified when calling answer().
:param threshold: The new default threshold to set, must be either 'verylow', 'low', 'medium', 'high' or 'veryhigh'.
:return: The new default threshold that's just been set.
"""
r = self._raw_api_call('user/set-default-threshold', {'threshold': threshold})
return r.json()['result']['threshold']
def set_forward_email(self, email):
"""
Set the email address that emails which couldn't be answered automatically are forwarded to.
:param email: The new forward email address to set.
:return: The new forward email address that's just been set.
"""
r = self._raw_api_call('user/set-forward-email', {'email': email})
return r.json()['result']['forwardEmail']
def answer(self, question, user_token=None, threshold=None, document_ids=None,
source_type='all', speed_or_accuracy='balanced', number_of_items=1, offset=0,
text=None):
"""
Provide a list of answers to a given question.
:param question: The question to ask.
:param user_token: A token retrieved from get_user_token (Default: the token for the currently authenticated user).
:param threshold: The minimum confidence of answers to return ('verylow'/'low'/'medium'/'medium'/'veryhigh').
:param document_ids: A list of documents to search for answers (Default: all documents).
:param source_type: Whether to search documents, saved replies or all ('document'/'saved_reply'/'all').
:param speed_or_accuracy: Prioritise speed or accuracy in answers ('speed'/'accuracy'/'balanced').
:param number_of_items: The number of answers to return.
:param offset: The starting point in the list of answers, used in conjunction with number_of_items to retrieve multiple batches of answers.
:param text: An inline text to be treated as a document with id "Inline Text".
:return: A list of answers.
"""
document_ids = check_list(document_ids, 'document IDs')
if not question.strip():
raise CapeException('Expecting question parameter to not be empty string')
invalidChars = set(string.punctuation.replace("_", ""))
if all(ch in invalidChars for ch in question.strip().replace(" ", "")):
raise CapeException(
'All characters in question parameter are punctuation. At least one alpha-numeric character required.')
params = {'token': user_token,
'question': question,
'threshold': threshold,
'documentIds': json.dumps(document_ids),
'sourceType': str(source_type),
'speedOrAccuracy': speed_or_accuracy,
'numberOfItems': str(number_of_items),
'offset': str(offset),
'text': text}
if user_token is None:
params.pop('token')
if not self.logged_in():
raise CapeException("A user token must be supplied if the client isn't logged in.")
if len(document_ids) == 0:
params.pop('documentIds')
if threshold is None:
params.pop('threshold')
if text is None:
params.pop('text')
r = self._raw_api_call('answer', params)
return r.json()['result']['items']
def get_inbox(self, read='both', answered='both', search_term='', number_of_items=30, offset=0):
"""
Retrieve the items in the current user's inbox.
:param read: Filter messages based on whether they have been read.
:param answered: Filter messages based on whether they have been answered.
:param search_term: Filter messages based on whether they contain the search term.
:param number_of_items: The number of inbox items to return.
:param offset: The starting point in the list of inbox items, used in conjunction with number_of_tems to retrieve multiple batches of inbox items.
:return: A list of inbox items in reverse chronological order (newest first).
"""
r = self._raw_api_call('inbox/get-inbox', {'read': str(read),
'answered': str(answered),
'searchTerm': search_term,
'numberOfItems': str(number_of_items),
'offset': str(offset)})
return r.json()['result']
def mark_inbox_read(self, inbox_id):
"""
Mark an inbox item as having been read.
:param inbox_id: The inbox item to mark as being read.
:return: The ID of the inbox item that was marked as read.
"""
r = self._raw_api_call('inbox/mark-inbox-read', {'inboxId': str(inbox_id)})
return r.json()['result']['inboxId']
def archive_inbox(self, inbox_id):
"""
Archive an inbox item.
:param inbox_id: The inbox item to archive.
:return: The ID of the inbox item that was archived.
"""
r = self._raw_api_call('inbox/archive-inbox', {'inboxId': str(inbox_id)})
return r.json()['result']['inboxId']
def get_saved_replies(self, search_term='', saved_reply_ids=None, number_of_items=30, offset=0):
"""
Retrieve a list of saved replies.
:param search_term: Filter saved replies based on whether they contain the search term.
:param saved_reply_ids: List of saved reply IDs to return.
:param number_of_items: The number of saved replies to return.
:param offset: The starting point in the list of saved replies, used in conjunction with number_of_tems to retrieve multiple batches of saved replies.
:return: A list of saved replies in reverse chronological order (newest first).
"""
saved_reply_ids = check_list(saved_reply_ids, 'saved reply IDs')
params = {'searchTerm': search_term,
'savedReplyIds': json.dumps(saved_reply_ids),
'numberOfItems': str(number_of_items),
'offset': str(offset)}
if len(saved_reply_ids) == 0:
params.pop('savedReplyIds')
r = self._raw_api_call('saved-replies/get-saved-replies', params)
return r.json()['result']
def create_saved_reply(self, question, answer):
return self.add_saved_reply(question, answer)
def add_saved_reply(self, question, answer, replace=False):
"""
Create a new saved reply.
Saved replies are made up of a pair consisting of a canonical question and the response it should produce.
In addition to the canonical question a saved reply may have many paraphrased questions associated with it
which should produce the same answer (e.g. "How old are you?" vs "What is your age?").
:param question: The question this saved reply relates to.
:param answer: The answer to reply with when the question is asked.
:param replace: If true and a saved reply already exists with the same question its answers will be overwritten with the new answer. If false an error is returned when a question already exists.
:return: The IDs of the new saved reply and answer.
"""
r = self._raw_api_call('saved-replies/add-saved-reply', {'question': question,
'answer': answer,
'replace': str(replace)})
return r.json()['result']
def delete_saved_reply(self, reply_id):
"""
Delete a saved reply.
:param reply_id: The ID of the saved reply to delete.
:return: The ID of the saved reply that was deleted.
"""
r = self._raw_api_call('saved-replies/delete-saved-reply', {'replyId': str(reply_id)})
return r.json()['result']['replyId']
def add_paraphrase_question(self, reply_id, question):
"""
Add a new paraphrase question to an existing saved reply.
:param reply_id: The ID of the saved reply to add this question to.
:param question: The new paraphrase of this saved reply's canonical question.
:return: The ID of the new question.
"""
r = self._raw_api_call('saved-replies/add-paraphrase-question',
{'replyId': str(reply_id), 'question': question})
return r.json()['result']['questionId']
def edit_paraphrase_question(self, question_id, question):
"""
Modify an existing paraphrase question.
:param question_id: The ID of the question to modify.
:param question: The modified question text.
:return: The ID of the question that was modified.
"""
r = self._raw_api_call('saved-replies/edit-paraphrase-question',
{'questionId': str(question_id), 'question': question})
return r.json()['result']['questionId']
def edit_canonical_question(self, reply_id, question):
"""
Modify the canonical question belonging to a saved reply.
:param reply_id: The ID of the saved reply to modify the canonical question of.
:param question: The modified question text.
:return: The ID of the saved reply that was modified.
"""
r = self._raw_api_call('saved-replies/edit-canonical-question',
{'replyId': str(reply_id), 'question': question})
return r.json()['result']['replyId']
def delete_paraphrase_question(self, question_id):
"""
Delete a paraphrase question.
:param question_id: The ID of the paraphrase question to delete.
:return: The ID of the paraphrase question that was deleted.
"""
r = self._raw_api_call('saved-replies/delete-paraphrase-question', {'questionId': str(question_id)})
return r.json()['result']['questionId']
def add_answer(self, reply_id, answer):
"""
Add a new answer to an existing saved reply.
:param reply_id: The ID of the saved reply to add this answer to.
:param answer: A new answer to add to the saved reply.
:return: The ID of the newly created answer.
"""
r = self._raw_api_call('saved-replies/add-answer', {'replyId': str(reply_id), 'answer': answer})
return r.json()['result']['answerId']
def edit_answer(self, answer_id, answer):
"""
Modify an existing answer.
:param answer_id: The ID of the answer to edit.
:param answer: The modified answer text.
:return: The ID of the answer that was modified.
"""
r = self._raw_api_call('saved-replies/edit-answer', {'answerId': str(answer_id), 'answer': answer})
return r.json()['result']['answerId']
def delete_answer(self, answer_id):
"""
Delete an existing an answer.
:param answer_id: The ID of the answer to delete.
:return: The ID of the answer that was deleted.
"""
r = self._raw_api_call('saved-replies/delete-answer', {'answerId': str(answer_id)})
return r.json()['result']['answerId']
def get_documents(self, document_ids=None, number_of_items=30, offset=0):
"""
Retrieve this user's documents.
:param document_ids: A list of documents to return.
:param number_of_items: The number of documents to return.
:param offset: The starting point in the list of documents, used in conjunction with number_of_items to retrieve multiple batches of documents.
:return: A list of documents in reverse chronological order (newest first).
"""
document_ids = check_list(document_ids, 'document IDs')
params = {'documentIds': json.dumps(document_ids),
'numberOfItems': str(number_of_items),
'offset': str(offset)}
if len(document_ids) == 0:
params.pop('documentIds')
r = self._raw_api_call('documents/get-documents', params)
return r.json()['result']
def upload_document(self, title, text=None, file_path=None, document_id='', origin='', replace=False,
document_type=None, monitor_callback=None):
return self.add_document(title, text, file_path, document_id, origin, replace, document_type, monitor_callback)
def add_document(self, title, text=None, file_path=None, document_id='', origin='', replace=False,
document_type=None, monitor_callback=None):
"""
Create a new document or replace an existing document.
:param title: The title to give the new document.
:param text: The plain text contents of the document (either text or file_path must be supplied).
:param file_path: A file to upload (either text or file_path must be supplied).
:param document_id: The ID to give the new document (Default: An SHA256 hash of the document contents).
:param origin: Where the document came from.
:param replace: If true and a document already exists with the same document ID it will be overwritten with the new upload. If false an error is returned when a document ID already exists.
:param document_type: Whether this document was created by inputting text or uploading a file (if not set this will be automatically determined).
:param monitor_callback: A method to call with updates on the file upload progress.
:return: The ID of the uploaded document.
"""
if text is not None:
if document_type is None:
document_type = 'text'
r = self._raw_api_call('documents/add-document', {'title': title,
'text': text,
'documentId': document_id,
'origin': origin,
'replace': str(replace)},
monitor_callback=monitor_callback)
elif file_path is not None:
if document_type is None:
document_type = 'file'
directory, file_name = os.path.split(file_path)
fh = open(file_path, 'rb')
r = self._raw_api_call('documents/add-document', {'title': title,
'text': fh,
'documentId': document_id,
'origin': origin,
'replace': str(replace)},
monitor_callback=monitor_callback)
fh.close()
else:
raise CapeException("Either the 'text' or the 'file_path' parameter are required for document uploads.")
return r.json()['result']['documentId']
def delete_document(self, document_id):
"""
Delete a document.
:param document_id: The ID of the document to delete.
:return: The ID of the document that was deleted.
"""
r = self._raw_api_call('documents/delete-document', {'documentId': document_id})
return r.json()['result']['documentId']
def add_annotation(self, question, answer, document_id, start_offset=None, end_offset=None, metadata=None):
"""
Create a new annotation for a specified document.
Annotations are made up of a pair consisting of a canonical question, the response it should produce and a
location within a specific document that this answer corresponds to.
In addition to the canonical question an annotation may have many paraphrased questions associated with it
which should produce the same answer (e.g. "How old are you?" vs "What is your age?").
:param question: The question this annotation relates to.
:param answer: The answer to reply with when the question is asked.
:param document_id: The document which this annotation corresponds to.
:param start_offset: The starting location of the annotation within the specified document.
:param end_offset: The ending location of the annotation within the specified document.
:param metadata: A dictionary containing user definable metadata about this annotation.
:return: The IDs of the new annotation and answer.
"""
params = {
'question': question,
'answer': answer,
'documentId': document_id,
'startOffset': str(start_offset),
'endOffset': str(end_offset),
'metadata': json.dumps(metadata)
}
if start_offset is None:
params.pop('startOffset')
if end_offset is None:
params.pop('endOffset')
if metadata is None:
params.pop('metadata')
r = self._raw_api_call('annotations/add-annotation', params)
return r.json()['result']
def get_annotations(self, search_term='', annotation_ids=None, document_ids=None, pages=None, number_of_items=30,
offset=0):
"""
Retrieve a list of annotations.
:param search_term: Filter annotations based on whether they contain the search term.
:param annotation_ids: A list of annotations to return/search within (Default: all annotations).
:param document_ids: A list of documents to return annotations from (Default: all documents).
:param pages: A list of pages to return annotations from (Default: all pages).
:param number_of_items: The number of annotations to return.
:param offset: The starting point in the list of annotations, used in conjunction with number_of_tems to retrieve multiple batches of annotations.
:return: A list of annotations.
"""
annotation_ids = check_list(annotation_ids, 'annotation IDs')
document_ids = check_list(document_ids, 'document IDs')
pages = check_list(pages, 'pages')
params = {'searchTerm': search_term,
'annotationIds': json.dumps(annotation_ids),
'documentIds': json.dumps(document_ids),
'pages': json.dumps(pages),
'numberOfItems': str(number_of_items),
'offset': str(offset)}
if len(annotation_ids) == 0:
params.pop('annotationIds')
if len(document_ids) == 0:
params.pop('documentIds')
if len(pages) == 0:
params.pop('pages')
r = self._raw_api_call('annotations/get-annotations', params)
return r.json()['result']
def delete_annotation(self, annotation_id):
"""
Delete an annotation.
:param annotation_id: The ID of the annotation to delete.
:return: The ID of the annotation that was deleted.
"""
r = self._raw_api_call('annotations/delete-annotation', {
'annotationId': annotation_id
})
return r.json()['result']['annotationId']
def edit_annotation_canonical_question(self, annotation_id, question):
"""
Edit the canonical question of an annotation.
:param annotation_id: The ID of the annotation to edit.
:param question: The new canonical question for this annotation.
:return: The ID of the annotation that was edited.
"""
r = self._raw_api_call('annotations/edit-canonical-question', {
'annotationId': annotation_id,
'question': question
})
return r.json()['result']['annotationId']
def add_annotation_paraphrase_question(self, annotation_id, question):
"""
Add a new paraphrase question to an existing annotation.
:param annotation_id: The ID of the annotation to add this question to.
:param question: The new paraphrase of this annotation's canonical question.
:return: The ID of the new question.
"""
r = self._raw_api_call('annotations/add-paraphrase-question', {
'annotationId': annotation_id,
'question': question
})
return r.json()['result']['questionId']
def edit_annotation_paraphrase_question(self, question_id, question):
"""
Modify an existing paraphrase question in an annotation.
:param question_id: The ID of the question to modify.
:param question: The modified question text.
:return: The ID of the question that was modified.
"""
r = self._raw_api_call('annotations/edit-paraphrase-question', {
'questionId': question_id,
'question': question
})
return r.json()['result']['questionId']
def delete_annotation_paraphrase_question(self, question_id):
"""
Delete an annotation's paraphrase question.
:param question_id: The ID of the question to delete.
:return: The ID of the question that was deleted.
"""
r = self._raw_api_call('annotations/delete-paraphrase-question', {
'questionId': question_id
})
return r.json()['result']['questionId']
def add_annotation_answer(self, annotation_id, answer):
"""
Add a new answer to an existing annotation.
:param annotation_id: The ID of the annotation to add this answer to.
:param answer: The answer to add to the annotation.
:return: The ID of the answer that was created.
"""
r = self._raw_api_call('annotations/add-answer', {
'annotationId': annotation_id,
'answer': answer
})
return r.json()['result']['answerId']
def edit_annotation_answer(self, answer_id, answer):
"""
Edit an annotation's answer.
:param answer_id: The ID of the answer to edit.
:param answer: The new text to be used for this answer.
:return: The ID of the answer that was edited.
"""
r = self._raw_api_call('annotations/edit-answer', {
'answerId': answer_id,
'answer': answer
})
return r.json()['result']['answerId']
def delete_annotation_answer(self, answer_id):
"""
Delete an answer from an annotation.
At least one answer must remain associated with an annotation.
:param answer_id: The answer to delete
:return: The ID of the answer that was deleted
"""
r = self._raw_api_call('annotations/delete-answer', {
'answerId': answer_id
})
return r.json()['result']['answerId']
|
import os
import pytest
import shutil
from muri.muda import Scale, Noise, NoiseScale, Transform
from muri.kantan import Scaler, Ichi, Ni, San, Both
def default_settings_model():
'''Returns default settings for scaling function (cpu)'''
scaler = Scale()
settings = scaler.config()
return settings.model
def selected_settings_model():
scaler = Scale(model='UpConv7')
settings = scaler.config()
return settings.model
def scale_image():
scaler = Scale()
models = scaler.cpu()
settings = scaler.config()
transformer = Transform(models, settings)
transformer.scale('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def scale_multple_images():
scaler = Scale()
models = scaler.cpu()
settings = scaler.config()
transformer = Transform(models, settings)
transformer.scale('images', 'test')
images = len(os.listdir('test'))
shutil.rmtree('test')
return images
def denoise_image():
denoiser = Noise()
models = denoiser.cpu()
settings = denoiser.config()
transformer = Transform(models, settings)
transformer.noise('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def denoise_multiple_images():
denoiser = Noise()
models = denoiser.cpu()
settings = denoiser.config()
transformer = Transform(models, settings)
transformer.noise('images', 'test')
images = len(os.listdir('test'))
shutil.rmtree('test')
return images
def noise_and_scale():
denoiser = NoiseScale()
models = denoiser.cpu()
settings = denoiser.config()
transformer = Transform(models, settings)
transformer.noise_scale('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def noise_and_scale_mutiple_images():
denoiser = NoiseScale()
models = denoiser.cpu()
settings = denoiser.config()
transformer = Transform(models, settings)
transformer.noise_scale('images', 'test')
images = len(os.listdir('test'))
shutil.rmtree('test')
return images
def kantan_scale():
Scaler.go('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def kantan_ichi():
Ichi.go('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def kantan_ni():
Ni.go('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def kantan_san():
San.go('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def kantan_both():
Both.go('images/small.png', 'test')
png = os.listdir('test')[0]
shutil.rmtree('test')
return png
def kantan_scale_batch():
Scaler.go('images', 'test')
images = len(os.listdir('test'))
shutil.rmtree('test')
return images
def test_default_model():
assert default_settings_model() == 'VGG7'
def test_selected_model():
assert selected_settings_model() == 'UpConv7'
def test_scale_image():
assert scale_image() == 'small.png'
def test_scale_multiple_images():
assert scale_multple_images() == 6
def test_denoiser():
assert denoise_image() == 'small.png'
def test_multiple_denoiser_images():
assert denoise_multiple_images() == 6
def test_noise_scale():
assert noise_and_scale() == 'small.png'
def test_multiple_denoiser_images():
assert noise_and_scale_mutiple_images() == 6
def test_kantan_scale():
assert kantan_scale() == 'small.png'
def test_kantan_ichi():
assert kantan_ichi() == 'small.png'
def test_kantan_ni():
assert kantan_ni() == 'small.png'
def test_kantan_san():
assert kantan_san() == 'small.png'
def test_kantan_both():
assert kantan_both() == 'small.png'
def test_katan_scale_batch():
assert kantan_scale_batch() == 6
|
# Exercício Python 082:
# Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente.
# Ao final, mostre o conteúdo das três listas geradas.
lista_valores = list()
lista_par = list()
lista_impar = list()
continuar = 'S'
while continuar != 'N':
valor_digitado = int(input("Digite um valor: "))
lista_valores.append(valor_digitado)
continuar = str(input("Quer continuar? [S/N] ")).strip().upper()[0]
if continuar not in 'SN':
continuar = str(input("Opção inválida,\nQuer continuar? [S/N] ")).strip().upper()[0]
print("Processando os dados...")
lista_valores.sort()
for valor in lista_valores:
if valor % 2 == 0:
lista_par.append(valor)
if valor % 2 == 1:
lista_impar.append(valor)
print(f"Então, a lista completa é {lista_valores}")
print(f"A Lista Par é {lista_par}")
print(f"A lista Impar é {lista_impar}")
|
from os import path
import numpy as np
import pandas as pd
import impyute as impy
from matplotlib import pyplot as plt
plt.close("all")
data_path = 'data'
# read geographic information for capitals
municipios = pd.read_csv(path.join(data_path, 'population_capitals.csv'))
# population density
municipios['density'] = municipios['population'] / municipios['area']
# List of months for preparing the output
start_date = '2017-01-01'
end_date = '2022-03-01'
frequency = '1M'
dates = pd.date_range(start_date, end_date, freq=frequency) - pd.offsets.MonthBegin(1)
dates = dates.strftime("%Y-%m").values.tolist()[:-2]
data_output = []
headers = []
# Weight and prepare changes
max_value = 0
for i in range(len(municipios)):
municipio = municipios.iloc[i]
asc = pd.read_csv(path.join(data_path + '/gee_results', municipio.iloc[0] + '_ASCENDING_.csv')).iloc[:, 1].to_numpy()
dsc = pd.read_csv(path.join(data_path + '/gee_results', municipio.iloc[0] + '_DESCENDING_.csv')).iloc[:, 1].to_numpy()
changes = (asc + dsc) / 2
data_output.append(changes)
headers.append(municipio.iloc[0])
# Convert to numpy and flip over the diagonal
data_output = np.rot90(np.fliplr(np.array(data_output)))
# Impute zeros
data_output[data_output == 0] = 'Nan'
data_output = impy.em(data_output)
data_output = pd.DataFrame(data=data_output, index=dates, columns=headers)
# Multiply monthly values by density
change_index = []
for index, row in data_output.iterrows():
monthly_total = 0
for i, v in row.iteritems():
monthly_total = monthly_total + (v * float(municipios[municipios['capital'] == i]['density']))
monthly_total = monthly_total/len(row)
change_index.append(monthly_total)
# Save as csv
data_output['change_index'] = change_index
data_output['change_index'].to_csv(path.join(data_path, 'change_index.csv'))
capitals = data_output.drop('change_index', 1)
# plot
plt.plot(capitals)
plt.xticks(rotation=80, ha='right')
plt.show()
print('eof')
|
#!/usr/bin/env python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path as osPath
from sys import path as sysPath
from time import sleep
from data_support import (
ControlState,
EkfState,
FamCommand,
Log,
PmcCommand,
PoseStamped,
Quaternion,
Vector3,
)
from pkg_resources import get_distribution, parse_version
try:
import rticonnextdds_connector as rti
except Exception as e:
raise ImportError(
"\nYou haven't installed a needed RTI DDS library. \n\nFirst, ensure you have"
+ " pip installed:\n\n\tsudo apt-get install python-pip\n\n"
+ "Then install the library:\n\n\tpip install rticonnextdds-connector\n"
)
import math
import threading
filepath = osPath.dirname(osPath.realpath(__file__))
connector = rti.Connector(
"MyParticipantLibrary::Zero", filepath + "/dds_types/CurrentDDSProfile.xml"
)
rti_version = get_distribution("rticonnextdds-connector").version
is_old_version = parse_version(rti_version) < parse_version("0.4.1")
def is_shutdown():
return False
class DdsSubscriberManager:
subscribers = dict()
sem = None
def __init__(self):
self.sem = threading.Semaphore()
def add_subscriber(self, key, subscriber, auto_start):
self.subscribers[key] = subscriber
if auto_start:
self.subscribers[key].start_sync(self.sem)
else:
self.subscribers[key].sem = self.sem
def get_subscriber(self, key):
return self.subscribers.get(key, None)
def start_subscriber(self, key):
sub = self.get_subscriber(key)
if sub == None:
return False
else:
sub.start_sync(self.sem)
if sub.isAlive():
return True
else:
return False
def stop_all(self):
for key, value in list(self.subscribers.items()):
value.stop()
def start_all(self):
for key, value in self.subscribers.itemitems():
value.start_sync(self.sem)
class DdsSubscriber(threading.Thread):
inputDDS = None
sem = None
stopper = None
callback = None
timeout = -1
translator = None
start_index = 0
def __init__(self, dds_sub, callback, ros_type):
threading.Thread.__init__(self)
self.inputDDS = connector.getInput(dds_sub)
self.stopper = threading.Event()
self.callback = callback
self.translator = Dict2RosMsgTranslator(ros_type)
self.daemon = True
self.start_index = 1 if is_old_version else 0
def run(self):
while not self.stopper.is_set():
self.sem.acquire(True)
connector.wait(self.timeout)
self.inputDDS.take()
numOfSamples = (
self.inputDDS.samples.getLength()
if not is_old_version
else self.inputDDS.samples.getLength() + 1
)
for j in range(self.start_index, numOfSamples):
if self.inputDDS.infos.isValid(j):
dictionary = self.inputDDS.samples.getDictionary(j)
data = self.translator.translate(dictionary)
self.callback(data)
self.sem.release()
def start_sync(self, sem):
self.sem = sem
if not self.isAlive():
self.start()
def stop(self):
self.stopper.set()
class Dict2RosMsgTranslator:
supported_ros_types = {
PoseStamped: lambda self, data: self.__dictionary_to_pose_stamped_msg(data),
EkfState: lambda self, data: self.__dictionary_to_ekf_msg(data),
FamCommand: lambda self, data: self.__dictionary_to_fam_msg(data),
ControlState: lambda self, data: self.__dictionary_to_control_msg(data),
PmcCommand: lambda self, data: self.__dictionary_to_pmc_msg(data),
Log: lambda self, data: self.__dictionary_to_log_msg(data),
}
ros_type = None
def __init__(self, ros_type):
self.ros_type = ros_type
def translate(self, dictionary):
ros_msg = self.supported_ros_types[self.ros_type](self, dictionary)
return ros_msg
def __array_to_vector3d(self, vector_array):
vector3d = Vector3()
vector3d.x = vector_array[0]
vector3d.y = vector_array[1]
vector3d.z = vector_array[2]
return vector3d
def __array_to_quaternion(self, rot_array):
quaternion = Quaternion()
quaternion.x = rot_array[0]
quaternion.y = rot_array[1]
quaternion.z = rot_array[2]
quaternion.w = rot_array[3]
return quaternion
def __dictionary_to_control_msg(self, dic):
msg = ControlState()
msg.when = dic["hdr"]["timeStamp"] * 1000
msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"])
msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"])
msg.twist.linear = self.__array_to_vector3d(dic["twist"]["linear"])
msg.twist.angular = self.__array_to_vector3d(dic["twist"]["angular"])
msg.accel.linear = self.__array_to_vector3d(dic["accel"]["linear"])
msg.accel.angular = self.__array_to_vector3d(dic["accel"]["angular"])
return msg
def __dictionary_to_fam_msg(self, dic):
msg = FamCommand()
msg.header.stamp = dic["hdr"]["timeStamp"]
msg.wrench.force = self.__array_to_vector3d(dic["wrench"]["force"])
msg.wrench.torque = self.__array_to_vector3d(dic["wrench"]["torque"])
msg.accel = self.__array_to_vector3d(dic["accel"])
msg.alpha = self.__array_to_vector3d(dic["alpha"])
msg.status = dic["status"]
msg.position_error = self.__array_to_vector3d(dic["position_error"])
msg.position_error_integrated = self.__array_to_vector3d(
dic["position_error_integrated"]
)
msg.attitude_error = self.__array_to_vector3d(dic["attitude_error"])
msg.attitude_error_integrated = self.__array_to_vector3d(
dic["attitude_error_integrated"]
)
msg.attitude_error_mag = dic["attitude_error_mag"]
msg.control_mode = dic["control_mode"]
# print msg.asDict()
return msg
def __dictionary_to_pose_stamped_msg(self, dic):
msg = PoseStamped()
msg.header.stamp = dic["hdr"]["timeStamp"]
msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"])
msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"])
return msg
def __dictionary_to_ekf_msg(self, dic):
# print dic
msg = EkfState()
msg.header.stamp = dic["hdr"]["timeStamp"]
msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"])
msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"])
msg.velocity = self.__array_to_vector3d(dic["velocity"])
msg.omega = self.__array_to_vector3d(dic["omega"])
msg.gyro_bias = self.__array_to_vector3d(dic["gyro_bias"])
msg.accel = self.__array_to_vector3d(dic["accel"])
msg.accel_bias = self.__array_to_vector3d(dic["accel_bias"])
for i in range(0, len(dic["cov_diag"])):
msg.cov_diag[i] = dic["cov_diag"][i]
msg.confidence = dic["confidence"]
msg.status = dic["status"]
msg.of_count = dic["of_count"]
msg.ml_count = dic["ml_count"]
msg.hr_global_pose.position = self.__array_to_vector3d(
dic["hr_global_pose"]["xyz"]
)
msg.hr_global_pose.orientation = self.__array_to_quaternion(
dic["hr_global_pose"]["rot"]
)
for i in range(0, len(dic["ml_mahal_dists"])):
msg.ml_mahal_dists[i] = dic["ml_mahal_dists"][i]
# print msg.asDict()
return msg
def __dictionary_to_pmc_msg(self, dic):
msg = PmcCommand()
msg.header.stamp = dic["hdr"]["timeStamp"]
for i in range(0, len(dic["goals"])):
goal = PmcCommand.PmcGoal(dic["goals"][i]["motorSpeed"])
for j in range(0, len(dic["goals"][i]["nozzlePositions"])):
goal.nozzle_positions.append(chr(dic["goals"][i]["nozzlePositions"][j]))
msg.goals.append(goal)
# print msg.asDict()
return msg
def __dictionary_to_log_msg(self, dic):
msg = Log()
msg.header.stamp = dic["hdr"]["timeStamp"]
msg.level = math.pow(2, dic["level"])
msg.name = dic["name"]
msg.msg = dic["msg"]
# print msg.asDict()
return msg
class DdsCommandExecutor:
def __init__(self):
pass
def reset_ekf(self):
print("reset_ekf function is not yet implemented on dds")
def initialize_bias(self):
print("initialize_bias function is not yet implemented on dds")
def toggle_pmc(self):
print("toggle_pmc function is not yet implemented on dds")
|
"""LBANN Python frontend."""
import sys
import os.path
import configparser
# Check for Python 3
if sys.version_info[0] != 3:
raise ImportError('Python 3 is required')
# Try getting build-specific paths from config file
_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'python_config.ini')
_lbann_exe = None
if os.path.isfile(_config_file):
try:
_config = configparser.ConfigParser()
_config.read(_config_file)
sys.path.append(os.path.dirname(_config['Paths']['lbann_pb2.py']))
_lbann_exe = _config['Paths']['lbann_exe']
except:
pass
import lbann_pb2, callbacks_pb2, layers_pb2, metrics_pb2, model_pb2, objective_functions_pb2, optimizers_pb2, reader_pb2, weights_pb2
def lbann_exe():
"""LBANN executable."""
return _lbann_exe if _lbann_exe else 'lbann'
# Import core functionality into lbann namespace
from lbann.callback import *
from lbann.layer import *
from lbann.metric import *
from lbann.model import *
from lbann.objective_function import *
from lbann.optimizer import *
from lbann.weights import *
from lbann.launcher import run
|
def getSNP(SNPLine):
# Gets the SNP from a line with SNP information
if SNPLine == "":
return [("", 0), True]
SNPLineElements = SNPLine.strip().split("\t")
SNPPosition = (SNPLineElements[0], int(SNPLineElements[1]))
return [SNPPosition, False]
def findENCODEPeakOverlap(ENCODEFileName, SNPFileName, outputFileName):
# Determine which SNPs are in peaks in an ENCODE file
# ASSUMES THAT ENCODEFile IS SORTED BY CHROMOSOME, START, END AND THAT SNPFile IS SORTED BY CHROMOSOME, POSITION
ENCODEFile = gzip.open(ENCODEFileName)
SNPFile = open(SNPFileName)
outputFile = open(outputFileName, 'w+')
[SNP, endOfSNPFile] = getSNP(SNPFile.readline())
for line in ENCODEFile:
# Iterate through the file with the ENCODE peaks and, for each SNP, record whether it is in a peak
lineElements = line.strip().split("\t")
while SNP[0] < lineElements[0]:
# Go through the SNP file until a SNP on the proper chromosome is reached
outputFile.write(SNP[0] + "\t" + str(SNP[1]) + "\t" + "0" + "\n")
[SNP, endOfSNPFile] = getSNP(SNPFile.readline())
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
while (SNP[0] == lineElements[0]) and (SNP[1] < int(lineElements[1])):
# Go through the SNP file until a SNP that is not earlier in the chromosome than the peak is reached
outputFile.write(SNP[0] + "\t" + str(SNP[1]) + "\t" + "0" + "\n")
[SNP, endOfSNPFile] = getSNP(SNPFile.readline())
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
while (SNP[0] == lineElements[0]) and ((SNP[1] >= int(lineElements[1])) and (SNP[1] <= int(lineElements[2]))):
# Go through the SNP file until a SNP that is not earlier in the chromosome than the peak is reached
outputFile.write(SNP[0] + "\t" + str(SNP[1]) + "\t" + "1" + "\n")
[SNP, endOfSNPFile] = getSNP(SNPFile.readline())
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
if endOfSNPFile == True:
# At the end of the file with SNPs, so stop
break
while endOfSNPFile == False:
# Not at the end of the SNP file, so record 0 for each remaining SNP
# ADDED MAY 2014 BUT NOT DEBUGGED
outputFile.write(SNP[0] + "\t" + str(SNP[1]) + "\t" + "1" + "\n")
[SNP, endOfSNPFile] = getSNP(SNPFile.readline())
SNPFile.close()
ENCODEFile.close()
outputFile.close()
if __name__=="__main__":
import sys
import gzip
ENCODEFileName = sys.argv[1]
SNPFileName = sys.argv[2]
outputFileName = sys.argv[3]
findENCODEPeakOverlap(ENCODEFileName, SNPFileName, outputFileName) |
import argparse
import subprocess
import glob
import re
import os
parser = argparse.ArgumentParser(description='Installing ThirdParty')
parser.add_argument('--all', help='install all dependencies', default=True)
parser.add_argument('--reinstall_all',
help='re-install all dependencies', default=False)
parser.add_argument(
'--reinstall', help='reinstall one library', type=str, default="")
def install_all_deps():
subprocess.call("./thirdparty/rules/cmake.sh")
for rule in glob.glob("./thirdparty/rules/*.sh"):
subprocess.call("./" + rule, shell=True)
def reinstall_all_deps():
to_remove = str("./thirdparty/all/")
if os.path.exists(to_remove):
subprocess.call(str("rm -r " + to_remove), shell=True)
install_all_deps()
def reinstall_one_lib(lib):
all_rules = glob.glob("./thirdparty/rules/*.sh")
names = []
for rule in all_rules:
names.append(re.search('%s(.*)%s' %
('./thirdparty/rules/', '.sh'), rule).group(1))
if lib not in names:
print('Error no rules found for ' + lib)
else:
to_remove = str("./thirdparty/all/" + lib)
if os.path.exists(to_remove):
subprocess.call(str("rm -r " + to_remove), shell=True)
subprocess.call("./thirdparty/rules/" + lib + ".sh", shell=True)
args = parser.parse_args()
if args.reinstall != "":
reinstall_one_lib(args.reinstall)
elif args.reinstall_all:
reinstall_all_deps()
else:
install_all_deps()
|
import numpy as np
import pandas as pd
import xgboost as xgb
from lifelines import WeibullAFTFitter
from sklearn.neighbors import BallTree
# lib utils
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
# at which percentiles will the KM predict
from xgbse.non_parametric import get_time_bins, calculate_interval_failures
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_WEIBULL = {}
class XGBSEStackedWeibull(XGBSEBaseEstimator):
"""
Perform stacking of a XGBoost survival model with a Weibull AFT parametric model.
The XGBoost fits the data and then predicts a value that is interpreted as a risk metric.
This risk metric is fed to the Weibull regression which uses it as its only independent variable.
Thus, we can get the benefit of XGBoost discrimination power alongside the Weibull AFT
statistical rigor (calibrated survival curves, confidence intervals)
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS,
weibull_params=DEFAULT_PARAMS_WEIBULL,
n_jobs=-1,
):
"""
Construct XGBSEStackedWeibull instance
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
weibull_params (Dict): parameters for Weibull Regerssion model, see
https://lifelines.readthedocs.io/en/latest/fitters/regression/WeibullAFTFitter.html
"""
self.xgb_params = xgb_params
self.weibull_params = weibull_params
self.persist_train = False
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=False,
index_id=None,
time_bins=None,
):
"""
Fit XGBoost model to predict a value that is interpreted as a risk metric.
Fit Weibull Regression model using risk metric as only independent variable.
Args:
X ([pd.DataFrame, np.array]): features to be used while fitting XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEStackedWeibull: Trained XGBSEStackedWeibull instance
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# predicting risk from XGBoost
train_risk = self.bst.predict(dtrain)
# replacing 0 by minimum positive value in df
# so Weibull can be fitted
min_positive_value = T_train[T_train > 0].min()
T_train = np.clip(T_train, min_positive_value, None)
# creating df to use lifelines API
weibull_train_df = pd.DataFrame(
{"risk": train_risk, "duration": T_train, "event": E_train}
)
# fitting weibull aft
self.weibull_aft = WeibullAFTFitter(**self.weibull_params)
self.weibull_aft.fit(weibull_train_df, "duration", "event", ancillary=True)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
index_leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(index_leaves, metric="hamming")
self.index_id = index_id
return self
def predict(self, X, return_interval_probs=False):
"""
Predicts survival probabilities using the XGBoost + Weibull AFT stacking pipeline.
Args:
X (pd.DataFrame): Dataframe of features to be used as input for the
XGBoost model.
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Default is False.
Returns:
pd.DataFrame: A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
risk = self.bst.predict(d_matrix)
weibull_score_df = pd.DataFrame({"risk": risk})
# predicting from logistic regression artifacts
preds_df = self.weibull_aft.predict_survival_function(
weibull_score_df, self.time_bins
).T
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
|
""" Print the third angle of a triangle given two angles"""
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
a, b = [int(i) for i in input().split()]
print(180-(a+b)) |
# Copyright 2018 Platform9 Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fn:
return fn.read()
setup(
name='pf9-saml-auth',
version='1.0',
description='Platform9 SAML Authentication Library for OpenStack Identity',
long_description=read('README.rst'),
url='https://github.com/platform9/pf9-saml-auth',
author='Blake Covarrubias',
author_email='blake@platform9.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: OpenStack',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
keywords='openstack keystone saml',
packages=[
'pf9_saml_auth',
'pf9_saml_auth.v3',
],
install_requires=[
'keystoneauth1',
'lxml',
'oktaauth',
'python-keystoneclient',
'six>=1.10.0',
],
python_requires='>=2.7',
zip_safe=False,
entry_points={
"keystoneauth1.plugin": [
"v3pf9samladfs = pf9_saml_auth._loading:V3Pf9ADFSPassword",
"v3pf9samlokta = pf9_saml_auth._loading:V3Pf9SamlOkta",
"v3pf9samlonelogin = pf9_saml_auth._loading:V3Pf9SamlOnelogin",
]
}
)
|
import argparse
from otoole.results.convert import convert_cplex_file
def main():
parser = argparse.ArgumentParser(description="Otoole: Python toolkit of OSeMOSYS users")
parser.add_argument("cplex_file",
help="The filepath of the OSeMOSYS cplex output file")
parser.add_argument("output_file",
help="The filepath of the converted file that will be written")
parser.add_argument("-s", "--start_year", type=int, default=2015,
help="Output only the results from this year onwards")
parser.add_argument("-e", "--end_year", type=int, default=2070,
help="Output only the results upto and including this year")
group = parser.add_mutually_exclusive_group()
group.add_argument("--csv", action="store_true",
help="Output file in comma-separated-values format")
group.add_argument("--cbc", action="store_true",
help="Output file in CBC format, (default option)")
args = parser.parse_args()
if args.csv:
output_format = 'csv'
else:
output_format = 'cbc'
convert_cplex_file(args.cplex_file, args.output_file,
args.start_year, args.end_year,
output_format)
|
import logging
import discord
from discord.commands import Option, context, permissions, slash_command
from discord.ext import commands
from chiya import config
from chiya.utils import embeds
log = logging.getLogger(__name__)
class PurgeCommands(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
async def can_purge_messages(self, ctx: context.ApplicationContext) -> bool:
"""
Check used by purge function to make sure that the moderation,
development, logs, and tickets categories can't be purged for
security reasons.
"""
if ctx.author.id == ctx.guild.owner.id:
return True
if ctx.channel.category_id in [
config["categories"]["moderation"],
config["categories"]["development"],
config["categories"]["logs"],
config["categories"]["tickets"],
]:
return False
return True
@slash_command(
guild_ids=config["guild_ids"], default_permission=False, description="Purge the last X amount of messages"
)
@permissions.has_role(config["roles"]["staff"])
async def purge(
self,
ctx: context.ApplicationContext,
amount: Option(int, description="The amount of messages to be purged", required=True),
reason: Option(str, description="The reason why the messages are being purged", required=True),
) -> None:
"""
Removes the last X amount of messages in bulk.
Capped at a maximum of 100 messages per command invoke to avoid
accidents wiping out large chunks of messages.
Cannot be used in the moderation, development, logs, or archive
categories for security reasons.
"""
await ctx.defer()
if not await self.can_purge_messages(ctx):
return await embeds.error_message(ctx=ctx, description="You cannot use that command in this category.")
if len(reason) > 4096:
return await embeds.error_message(ctx=ctx, description="Reason must be less than 4096 characters.")
amount = 100 if amount > 100 else amount
embed = embeds.make_embed(
title="Purged messages",
description=f"{ctx.author.mention} purged {amount} {'message' if amount == 1 else 'messages'}.",
thumbnail_url="https://i.imgur.com/EDy6jCp.png",
color=discord.Color.red(),
fields=[{"name": "Reason:", "value": reason, "inline": False}],
)
await ctx.channel.purge(limit=amount, before=ctx.channel.last_message.created_at, bulk=True)
await ctx.send_followup(embed=embed)
def setup(bot: commands.Bot) -> None:
bot.add_cog(PurgeCommands(bot))
log.info("Commands loaded: purge")
|
"""Main app/routing for Twitter"""
from flask import Flask, render_template
from models import DB, User, Tweet
from twitter import *
# function to intialize the flask instance
# create the application
def create_app():
"""Create and configuring an instance of the Flask application"""
# location of the Flask application and look in the current directory
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# with app.app_context():
DB.init_app(app)
# DB.init_app(app) # initialize the DB with this application, DB is an instance of SQLAlchemy class
# add a decorator
@app.route('/')
def root():
# DB.drop_all()
# DB.create_all()
# app_user = User(id=1, name='app_user')
# DB.session.add(app_user)
# DB.session.commit()
# return render_template('base.html',
# title='Home',
# users=User.query.all())
return "text"
# PS muse: when you are using Title = Home you are using Jinja2
return app
#
# if __name__ == '__main__':
# create_app().run()
|
from collections import namedtuple
import kornia.color as kc
import torch
import torch.nn as nn
import torch.nn.functional as f
import torchvision.models.vgg as vgg
class GANLoss(nn.Module):
"""PyTorch module for GAN loss.
This code is inspired by
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.
"""
def __init__(
self, gan_mode="wgangp", target_real_label=1.0, target_fake_label=0.0
):
super(GANLoss, self).__init__()
self.register_buffer("real_label", torch.tensor(target_real_label))
self.register_buffer("fake_label", torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == "lsgan":
self.loss = nn.MSELoss()
elif gan_mode == "vanilla":
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ["wgangp"]:
self.loss = None
else:
raise NotImplementedError(
"gan mode %s not implemented" % gan_mode
)
def get_target_tensor(self, prediction, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction).detach()
def forward(self, prediction, target_is_real):
if self.gan_mode in ["lsgan", "vanilla"]:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == "wgangp":
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
else:
raise RuntimeError(self.gan_mode)
return loss
class VGGLoss(nn.Module):
"""PyTorch module for VGG loss.
Parameter
---------
net_type : str
type of vgg network, i.e. `vgg16` or `vgg19`.
layer : str
layer where the mean squared error is calculated.
rescale : float
rescale factor for VGG Loss
"""
def __init__(self, net_type="vgg19", layer="relu2_2", rescale=0.006):
super(VGGLoss, self).__init__()
if net_type == "vgg16":
assert layer in ["relu1_2", "relu2_2", "relu3_3", "relu4_3"]
self.__vgg_net = VGG16()
self.__layer = layer
elif net_type == "vgg19":
assert layer in [
"relu1_2",
"relu2_2",
"relu3_4",
"relu4_4",
"relu5_4",
]
self.__vgg_net = VGG19()
self.__layer = layer
self.register_buffer(
name="vgg_mean",
tensor=torch.tensor(
[[[0.485]], [[0.456]], [[0.406]]], requires_grad=False
),
)
self.register_buffer(
name="vgg_std",
tensor=torch.tensor(
[[[0.229]], [[0.224]], [[0.225]]], requires_grad=False
),
)
self.register_buffer( # to balance VGG loss with other losses.
name="rescale", tensor=torch.tensor(rescale, requires_grad=False)
)
def __normalize(self, img):
img = img.sub(self.vgg_mean.detach())
img = img.div(self.vgg_std.detach())
return img
def forward(self, x, y):
"""順方向
Paramenters
---
x, y : torch.Tensor
input or output tensor. they must be normalized to [0, 1].
Returns
---
out : torch.Tensor
mean squared error between the inputs.
"""
norm_x = self.__normalize(x)
norm_y = self.__normalize(y)
feat_x = getattr(self.__vgg_net(norm_x), self.__layer)
feat_y = getattr(self.__vgg_net(norm_y), self.__layer)
out = f.mse_loss(feat_x, feat_y) * self.rescale
return out
class VGG16(nn.Module):
"""Blockwise pickable VGG16.
This code is inspired by
https://gist.github.com/crcrpar/a5d46738ffff08fc12138a5f270db426
"""
def __init__(self, requires_grad=False):
super(VGG16, self).__init__()
vgg_pretrained_features = vgg.vgg16(pretrained=True).features
self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
self.slice3 = nn.Sequential()
self.slice4 = nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h = self.slice1(x)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple(
"VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"]
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out
class VGG19(nn.Module):
"""Blockwise pickable VGG19.
This code is inspired by
https://gist.github.com/crcrpar/a5d46738ffff08fc12138a5f270db426
"""
def __init__(self, requires_grad=False):
super(VGG19, self).__init__()
vgg_pretrained_features = vgg.vgg19(pretrained=True).features
self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
self.slice3 = nn.Sequential()
self.slice4 = nn.Sequential()
self.slice5 = nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 18):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(18, 27):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(27, 36):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h = self.slice1(x)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_4 = h
h = self.slice4(h)
h_relu4_4 = h
h = self.slice5(h)
h_relu5_4 = h
vgg_outputs = namedtuple(
"VggOutputs",
["relu1_2", "relu2_2", "relu3_4", "relu4_4", "relu5_4"],
)
out = vgg_outputs(
h_relu1_2, h_relu2_2, h_relu3_4, h_relu4_4, h_relu5_4
)
return out
class TVLoss(nn.Module):
"""Total Variation Loss.
This code is copied from
https://github.com/leftthomas/SRGAN/blob/master/loss.py
"""
def __init__(self, tv_loss_weight=1):
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, : h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, : w_x - 1]), 2).sum()
return (
self.tv_loss_weight
* 2
* (h_tv / count_h + w_tv / count_w)
/ batch_size
)
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
class PSNR(nn.Module):
"""Peak Signal/Noise Ratio."""
def __init__(self, max_val=1.0):
super(PSNR, self).__init__()
self.max_val = max_val
def forward(self, predictions, targets):
if predictions.shape[1] == 3:
predictions = kc.rgb_to_grayscale(predictions)
targets = kc.rgb_to_grayscale(targets)
mse = f.mse_loss(predictions, targets)
psnr = 10 * torch.log10(self.max_val ** 2 / mse)
return psnr
|
import graphene
from django.utils.text import slugify
from .types import Ushop
from ..core.types import SeoInput, Upload
from ..core.types.common import SeoInput
from ..core.utils import clean_seo_fields, validate_image_file
from ..core.mutations import ( # ClearMetaBaseMutation,; UpdateMetaBaseMutation,
BaseMutation,
ModelDeleteMutation,
ModelMutation,
)
from ...unurshop.ushop import models
from ...unurshop.ushop.thumbnails import create_ushop_logo_image_thumbnails
from ...product.models import Product, ProductType, Category
from ...core.utils import generate_unique_slug
class UshopInput(graphene.InputObjectType):
name = graphene.String(description="Ushop name.")
url = graphene.String(description="Ushop url.")
description = graphene.String(
description=("Ushop content. May consists of ordinary text, HTML and images.")
)
description_json = graphene.JSONString(description="Ushop content in JSON format.")
is_published = graphene.Boolean(
description="Determines if Ushop is visible in the storefront"
)
publication_date = graphene.String(
description="Publication date. ISO 8601 standard."
)
seo = SeoInput(description="Search engine optimization fields.")
logo_image = Upload(description="Logo image file.")
logo_image_alt = graphene.String(description="Alt text for an image.")
rank = graphene.Int(description="shop rank")
rating_main = graphene.Int(description="main rank")
rating_uk_shipping = graphene.Int(description="rating_uk_shipping rank")
rating_product_quality = graphene.Int(description="rating_product_quality rank")
rating_product_price = graphene.Int(description="marating_product_pricein rank")
rating_shuurhai = graphene.Int(description="rating_shuurhai rank")
rating_product_rank = graphene.Int(description="rating_product_rank rank")
listSelection = graphene.String(
description=("Ushop listiig zadlah selection")
)
productSelection = graphene.String(
description=("Ushop productiig zadlah selection")
)
class UshopCreate(ModelMutation):
class Arguments:
input = UshopInput(
required=True, description="Fields required to create a Ushop."
)
class Meta:
description = "Creates a new Ushop."
model = models.Shop
permissions = ("page.manage_pages",)
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
# slug = cleaned_input.get("slug", "")
# if not slug:
# cleaned_input["slug"] = slugify(cleaned_input["title"])
if data.get("logo_image"):
image_data = info.context.FILES.get(data["logo_image"])
validate_image_file(image_data, "logo_image")
clean_seo_fields(cleaned_input)
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
if not instance.shipping_product:
uk_shipping_pt = ProductType.objects.get(pk=16)
uk_shipping_cat = Category.objects.get(pk=24)
sh_product = Product(
product_type = uk_shipping_pt,
category = uk_shipping_cat,
name = instance.name+" англи дахь хүргэлт",
ushop = instance,
charge_taxes = False,
is_published = True
)
slug = generate_unique_slug(sh_product, sh_product.name)
sh_product.slug = slug
instance.shipping_product = sh_product
sh_product.save()
instance.save()
# ushop.save()
if cleaned_input.get("logo_image"):
create_ushop_logo_image_thumbnails.delay(instance.pk)
class UshopUpdate(UshopCreate):
class Arguments:
id = graphene.ID(required=True, description="ID of a Ushop to update.")
input = UshopInput(
required=True, description="Fields required to update a Ushop."
)
class Meta:
description = "Updates an existing Ushop."
model = models.Shop
permissions = ("page.manage_pages",)
class UshopDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a Ushop to delete.")
class Meta:
description = "Deletes a Ushop."
model = models.Shop
permissions = ("page.manage_pages",)
|
from __future__ import annotations
from dataclasses import dataclass
import re
@dataclass(frozen=True)
class CubeGeometry:
x1: int
x2: int
y1: int
y2: int
z1: int
z2: int
@staticmethod
def from_line(line: str) -> CubeGeometry:
return CubeGeometry(*map(int, re.findall(r"-?[0-9]+", line)))
def intersect(self, other: CubeGeometry) -> CubeGeometry:
c = CubeGeometry(
max(self.x1, other.x1),
min(self.x2, other.x2),
max(self.y1, other.y1),
min(self.y2, other.y2),
max(self.z1, other.z1),
min(self.z2, other.z2),
)
if c.x1 <= c.x2 and c.y1 <= c.y2 and c.z1 <= c.z2:
return c
def volume(self) -> int:
return (self.x2 - self.x1 + 1) * (self.y2 - self.y1 + 1) * (self.z2 - self.z1 + 1)
|
"""MIT License
Copyright (c) 2021 Buco
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
class WaifuException(Exception):
"""Base exception class for the wrapper."""
class APIException(WaifuException):
"""Exception due to an error response from waifu.im API."""
def __init__(self, status: int, reason: str, errormessage: str) -> None:
"""Initializes the APIException.
Args:
status: HTTP status code of the response.
reason: HTTP status reason of the response.
message: The response message.
"""
super().__init__(f'{status} {reason}: {errormessage}')
class NoToken(WaifuException):
"""Exception raised when the user try to request the gallery route with no token"""
def __init__(self,message=f'You tried to request the gallery route with no token. Please pass your token to HoriAioClient'):
super().__init__(message)
|
'''
Created on Aug 6, 2018
@author: fan
Design page subplot
'''
def subplot_design(plot_count=10, base_multiple=4, base_multiple_high_frac = 0.60):
"""subplot grid and size given total plot count
figsize = (width height)
Examples
--------
import Support.graph.subplot as sup_graph_subplot
figsize, rows, cols = sup_graph_subplot.subplot_design(plot_count=10, base_multiple=4, base_multiple_high_frac = 0.60)
"""
if plot_count == 1:
figsize = (base_multiple*1,base_multiple*1)
rows = 1
cols = 1
elif plot_count <= 2:
figsize = (base_multiple*2,base_multiple*1)
rows = 1
cols = 2
elif plot_count <= 4:
figsize = (base_multiple*2,base_multiple*2)
rows = 2
cols = 2
elif plot_count <= 6:
figsize = (base_multiple*3,base_multiple*2)
rows = 2
cols = 3
elif plot_count <= 9:
figsize = (base_multiple*3,base_multiple*3)
rows = 3
cols = 3
elif plot_count <= 12:
figsize = (base_multiple*4,base_multiple*3)
rows = 3
cols = 4
elif plot_count <= 16:
figsize = (base_multiple*4,base_multiple*4)
rows = 4
cols = 4
elif plot_count <= 20:
figsize = (base_multiple*5,base_multiple*4)
rows = 4
cols = 5
elif plot_count <= 25:
figsize = (base_multiple*5,base_multiple*5)
rows = 5
cols = 5
else:
lessthan = True
sqr_ctr = 1
while lessthan:
sqr_ctr += 1
if (sqr_ctr**2 >= plot_count):
lessthan = False
minus_one_row = False
if (sqr_ctr*(sqr_ctr-1) >= plot_count):
minus_one_row = True
rows = sqr_ctr
if (minus_one_row):
rows = rows - 1
cols = sqr_ctr
figsize_width = max(20, int(base_multiple*base_multiple_high_frac*rows))
figsize_width = max(20, int(base_multiple*base_multiple_high_frac*cols))
figsize = (figsize_width, figsize_width)
return figsize, rows, cols
if __name__ == "__main__":
for plot_count in range(100):
print('plot_count:', plot_count, ', result:', subplot_design(plot_count=plot_count))
|
#!/usr/bin/env python
# coding: utf-8
# In[20]:
import numpy as np
import math
# In[48]:
class NodeL2():
inputArr=np.zeros((3,1))
def __init__(self,arr):
self.inputArr=np.copy(arr)
def output(self):
return np.sum(np.square(self.inputArr))
def localGradient(self):
return np.multiply(2,self.inputArr)
def downstream(self,upstream):
return self.localGradient()*upstream
# In[59]:
class NodeSigmoid():
inputArr =np.zeros((3,1))
def __init__(self,arr):
self.inputArr=np.copy(arr)
def sigmoid(self):
g=1/(1+np.exp(-self.inputArr))
return g
def localGradient(self):
return np.multiply((1-self.sigmoid()),self.sigmoid())
def downstream(self,upstream):
return np.multiply(self.localGradient(),upstream)
# In[93]:
class NodeMultiply():
inputArr1=np.zeros((3,3))
inputArr2=np.zeros((3,1))
def __init__(self,arr1,arr2):
self.inputArr1=np.copy(arr1)
self.inputArr2=np.copy(arr2)
def output(self):
return np.dot(self.inputArr1,self.inputArr2)
def localGradient1(self):
return np.transpose(self.inputArr2)
def localGradient2(self):
return np.transpose(self.inputArr1)
def downstream1(self,upstream):
return np.dot(upstream,self.localGradient1())
def downstream2(self,upstream):
return np.dot(self.localGradient2(),upstream)
# In[131]:
class Fx():
W=np.zeros((3,3))
X=np.zeros((3,1))
def __init__(self,arr1,arr2):
self.W = np.copy(arr1)
self.X = np.copy(arr2)
def forward (self):
N1 = NodeMultiply(self.W,self.X)
N2 = NodeSigmoid(N1.output())
N3 = NodeL2(N2.sigmoid())
return N3.output()
def backward (self):
N1 = NodeMultiply(self.W,self.X)
N2 = NodeSigmoid(N1.output())
N3 = NodeL2(N2.sigmoid())
dW = N1.downstream1(N2.downstream(N3.downstream(1)))
dX = N1.downstream2(N2.downstream(N3.downstream(1)))
return dW,dX
# In[132]:
# In[133]:
if __name__ == "__main__":
n1 =Fx([[1,0,1],[1,0,1],[0,1,1]],[[1],[0],[0]])
print(n1.backward())
|
from . import base
from . import settings
|
import copy
import efficientnet.model as eff
from classification_models.models_factory import ModelsFactory
class BackbonesFactory(ModelsFactory):
_default_feature_layers = {
# List of layers to take features from backbone in the following order:
# (x16, x8, x4, x2, x1) - `x4` mean that features has 4 times less spatial
# resolution (Height x Width) than input image.
# ResNets
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
# ResNeXt
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
# DenseNet
'densenet169': (367, 139, 51, 4),
# SE models
'seresnext50': (1078, 584, 254, 4),
'seresnext101': (2472, 584, 254, 4),
# EfficientNets
'efficientnetb3': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb4': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
}
_models_update = {
'efficientnetb3': [eff.EfficientNetB3, eff.preprocess_input],
'efficientnetb4': [eff.EfficientNetB4, eff.preprocess_input],
}
@property
def models(self):
all_models = copy.copy(self._models)
all_models.update(self._models_update)
return all_models
def get_backbone(self, name, *args, **kwargs):
model_fn, _ = self.get(name)
model = model_fn(*args, **kwargs)
return model
def get_feature_layers(self, name, n=5):
return self._default_feature_layers[name][:n]
def get_preprocessing(self, name):
return self.get(name)[1]
Backbones = BackbonesFactory()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.