content
stringlengths 5
1.05M
|
|---|
from lib.base import BaseGithubAction
from lib.formatters import repo_to_dict
__all__ = [
'GetRepoAction'
]
class GetRepoAction(BaseGithubAction):
def run(self, user, repo, base_url):
if base_url == None:
self._reset(user)
else:
self._reset(user+'|'+base_url)
user = self._client.get_user(user)
repo = user.get_repo(repo)
result = repo_to_dict(repo=repo)
return result
|
from unittest import TestCase
from two_thinning.strategies.mean_thinning_strategy import MeanThinningStrategy
class TestMeanThinningStrategy(TestCase):
def __init__(self):
super().__init__()
self.strategy = MeanThinningStrategy(n=3, m=5)
def test_decide(self):
self.fail()
def test_note(self):
self.fail()
def test_reset(self):
self.fail()
|
from datetime import datetime
import logging
from django.conf import settings
from django.core.cache import cache
from django.http import JsonResponse
from django.views.generic import TemplateView
import pytz
import requests
log = logging.getLogger(__file__)
class HomePageView(TemplateView):
"""
Handles the homepage view including querying meetup.com for upcoming events
- Handles caching meetup API requests to meetup so we don't get rate limited
"""
cache_duration = 60 * 15
cache_key = "pythonsd.views.MeetupWidget"
template_name = "pythonsd/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["upcoming_events"] = self.get_upcoming_events()
return context
def get_upcoming_events(self):
events = cache.get(self.cache_key)
if events:
log.debug("Using Meetup.com events from cache")
return events
log.debug("Requesting upcoming events from Meetup.com")
# https://www.meetup.com/meetup_api/docs/:urlname/events/
try:
resp = requests.get(
"https://api.meetup.com/pythonsd/events",
params={"photo-host": "public", "page": "3"},
timeout=5,
)
except Exception:
return []
if resp.ok:
# Transform from meetup's API format into our format
events = [
{
"link": e["link"],
"name": e["name"],
# Always show time in local San Diego time
"datetime": datetime.utcfromtimestamp(e["time"] // 1000)
.replace(tzinfo=pytz.utc)
.astimezone(pytz.timezone(settings.TIME_ZONE)),
"venue": e["venue"]["name"] if "venue" in e else None,
}
for e in resp.json()
]
cache.set(self.cache_key, events, self.cache_duration)
return events
return []
|
import logging
from flask import request, url_for
from telegram import Update
from backend import DEVELOPMENT_MODE, TELEGRAM_TOKEN, app, bot, update_queue
LOG = logging.getLogger(__name__)
last_update_id = 0
@app.route(f"/{TELEGRAM_TOKEN}/", methods=["POST"])
def telegram_webhook():
update = Update.de_json(request.get_json(force=True), bot)
update_queue.put(update)
return "success"
@app.route("/set-telegram-webhook/")
def set_telegram_webhook():
webhook_url = url_for(".telegram_webhook", _external=True)
logging.info(f"Setting webhook to {webhook_url=}")
result = bot.set_webhook(webhook_url)
if not result:
LOG.error("Telegram webhook setup failed!")
return "failed"
LOG.info("Telegram webhook setup success")
return "success"
if DEVELOPMENT_MODE:
@app.route("/disable-telegram-webhook/")
def disable_telegram_webhook():
logging.info(f"Disabling webhook")
result = bot.set_webhook("")
if not result:
LOG.error("Telegram webhook disabling failed!")
return "failed"
LOG.info("Telegram webhook disabling success")
return "success"
@app.route("/process-last-message/")
def process_last_message():
global last_update_id
updates = bot.get_updates(offset=last_update_id, limit=1, timeout=1)
if not updates:
LOG.info(f"Empty updates; {updates=}; {last_update_id=}")
return "empty"
last_update_id = updates[-1].update_id + 1
LOG.info(f"Some updates; {len(updates)=}; {last_update_id=}")
for update in updates:
LOG.info(f"Enqueuing {update.update_id=}...")
update_queue.put(update)
return str(last_update_id)
|
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.management import call_command
from workflow.models import Country
from factories.workflow_models import CountryFactory, UserFactory, TolaUserFactory, CountryAccess
class TestUpdateUserPermissions (TestCase):
def setUp(self):
CountryFactory.create_batch(5)
self.tola_user1 = TolaUserFactory()
self.tola_user2 = TolaUserFactory()
self.tola_user3 = TolaUserFactory()
self.country_count = Country.objects.count()
def test_single_user(self):
call_command('update_user_permissions', self.tola_user1.user.email)
self.assertEqual(self.tola_user1.countries.all().count(), self.country_count)
self.assertEqual(self.tola_user2.countries.all().count(), 0)
self.assertEqual(self.tola_user3.countries.all().count(), 0)
def test_multi_user(self):
call_command('update_user_permissions', self.tola_user1.user.email, self.tola_user2.user.email)
self.assertEqual(self.tola_user1.countries.all().count(), self.country_count)
self.assertEqual(self.tola_user2.countries.all().count(), self.country_count)
self.assertEqual(self.tola_user3.countries.all().count(), 0)
def test_existing_permission(self):
primary_country = Country.objects.first()
CountryAccess.objects.create(tolauser=self.tola_user1, country=primary_country, role='high')
call_command('update_user_permissions', self.tola_user1.user.email)
self.assertEqual(self.tola_user1.countries.all().count(), self.country_count)
self.assertEqual(CountryAccess.objects.filter(tolauser=self.tola_user1, country=primary_country).count(), 1)
self.assertEqual(CountryAccess.objects.get(tolauser=self.tola_user1, country=primary_country).role, 'high')
non_primary_country_pks = Country.objects.exclude(pk=primary_country.pk).values_list('pk', flat=True)
non_primary_country_roles = CountryAccess.objects\
.filter(country__in=non_primary_country_pks, tolauser=self.tola_user1)\
.values_list('role', flat=True)
self.assertTrue(all([True if role == 'user' else False for role in non_primary_country_roles]))
|
from wifi import Cell, exceptions
from time import sleep
from sys import argv
import sqlite3
import os
EVIL_TWIN_DB = "src/python/evil_twin/evil_twin.db"
def scan_for_evil_twin(time, adapter):
conn = sqlite3.connect(EVIL_TWIN_DB)
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS LOG (DUPLICATE TEXT NOT NULL, TIME TEXT NOT NULL)")
conn.commit()
conn.close()
try:
while True:
duplicates = []
ssids = []
try:
ssids = [cell.ssid for cell in Cell.all(adapter)]
except exceptions.InterfaceError:
# sometimes can't read, just skip
sleep(1)
continue
for ssid in ssids:
if ssids.count(ssid) > 1 and ssid not in duplicates and ssid != "":
duplicates.append(ssid)
for duplicate in duplicates:
print("[Evil-Twin Detector]: Warning - Found multiple wifi networks with the same SSID: {}".format(duplicate))
conn = sqlite3.connect(EVIL_TWIN_DB)
cursor = conn.cursor()
cursor.execute("INSERT OR IGNORE INTO LOG VALUES (?, strftime('%Y-%m-%d %H:%M', 'now', 'localtime'))", (duplicate, ))
conn.commit()
conn.close()
sleep(time)
except KeyboardInterrupt:
print("[Evil-Twin Detector]: Terminating")
def get_log():
log = []
conn = sqlite3.connect(EVIL_TWIN_DB)
cursor = conn.cursor()
cursor.execute("SELECT * from LOG")
result = cursor.fetchall()
conn.close()
for entry in result:
log.append({"SSID": entry[0], "Time": entry[1]})
return {"result": log}
def delete_log():
print(os.geteuid())
conn = sqlite3.connect(EVIL_TWIN_DB)
cursor = conn.cursor()
cursor.execute("DELETE FROM LOG")
conn.commit()
conn.close()
return {"status": "success"}
def main():
try:
time = int(argv[1])
adapter = argv[2]
except IndexError:
print("""
Usage: evil_detector.py [time] [adapter]
""")
scan_for_evil_twin(time, adapter)
if __name__ == "__main__":
main()
|
from srcode import app
import os, click
def startup(app):
@app.cli.group()
def translate():
"""Translation and localization commands for easier translations. Parent command that provides a base for other commands below"""
pass
@translate.command()
@click.argument('lang')
def update():
'''update all the languages'''
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system('pybabel update -i messages.pot -d srcode/translations'):
raise RuntimeError('update command failed')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d srcode/translations'):
raise RuntimeError('compile command failed')
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system(
'pybabel init -i messages.pot -d srcode/translations -l ' + lang):
raise RuntimeError('init command failed')
os.remove('messages.pot')
|
"""Run a simple test of the Cusser class."""
import curses
import sys
from functools import reduce
from ._misc import (
_SUPPORTED_ATTRIBUTE_TAGS,
_SUPPORTED_COLOR_TAGS,
_app,
_clear_line,
_clear_screen,
_move,
_step,
)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(
f"usage: {sys.argv[0]} <example>, where <example> is one of: "
"'attributes', 'colors', 'clear', 'cursor'"
)
sys.exit(1)
MESSAGE = "The quick brown fox jumps over the lazy dog"
if sys.argv[1] == "attributes":
text = reduce(
lambda acc, attribute: acc + attribute(MESSAGE) + "\n",
_SUPPORTED_ATTRIBUTE_TAGS,
"",
)
elif sys.argv[1] == "colors":
text = reduce(
lambda acc, color: acc + color(MESSAGE) + "\n", _SUPPORTED_COLOR_TAGS, ""
)
elif sys.argv[1] == "clear":
text = f"{MESSAGE}{_clear_screen}Screen cleared!\n{MESSAGE}{_clear_line}"
elif sys.argv[1] == "cursor":
text = f"{MESSAGE}{_move()}{_step(1, 1)}{MESSAGE}{_move(3, 3)}{MESSAGE}"
else:
raise ValueError(
f"unknown example: {sys.argv[1]}, must be one of: "
"'attributes', 'colors', 'clear', 'cursor'"
)
curses.wrapper(lambda stdscr: _app(stdscr, text))
|
n, c = map(int, input().split())
array = []
for i in range(n):
array.append(int(input()))
array.sort()
def binary_search(array, start, end):
while start <= end:
mid = (start + end) // 2
current = array[0]
count = 1
for i in range(1, len(array)):
if array[i] >= current + mid:
count += 1
current = array[i]
if count >= c:
global answer
start = mid + 1
answer = mid
else:
end = mid - 1
start = 1
end = array[-1] - array[0]
answer = 0
binary_search(array, start, end)
print(answer)
|
# -*- coding:utf-8 -*-
# @Time: 2021/1/30 11:36
# @Author: Zhanyi Hou
# @Email: 1295752786@qq.com
# @File: pythonedit.py
import logging
import time
import re
from typing import Tuple, List, TYPE_CHECKING
from qtpy.QtGui import QTextCursor, QMouseEvent, QKeyEvent, QTextBlock
from qtpy.QtWidgets import QLabel, QListWidgetItem, QApplication
from qtpy.QtCore import QPoint, QModelIndex, Signal
from qtpyeditor.codeedit import PMBaseCodeEdit
from qtpyeditor.highlighters import PythonHighlighter
from qtpyeditor.Utilities import AutoCompThread
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if TYPE_CHECKING:
from jedi.api import Completion
class PMPythonCodeEdit(PMBaseCodeEdit):
def __init__(self, parent=None):
super(PMPythonCodeEdit, self).__init__(parent)
# self.setLineWrapMode(QPlainTextEdit.NoWrap)
# self.doc_tab_widget: 'PMGPythonEditor' = parent
# self.filename = '*'
# self.path = ''
# self.modified = True
self.highlighter = PythonHighlighter(self.document())
self.setTabChangesFocus(False)
self.autocomp_thread = AutoCompThread()
self.autocomp_thread.trigger.connect(self.on_autocomp_signal_received)
self.autocomp_thread.start()
self.setMouseTracking(True)
self.last_mouse_position: QPoint = None
self.last_mouse_moved = time.time()
self.hint_widget = QLabel('', parent=self)
self.hint_widget.setVisible(False)
def on_autocomp_signal_received(self, text_cursor_content: tuple, completions: List['jedi.api.Completion']):
'''
当收到自动补全提示信号时,执行的函数。
:param text_cursor_content:(row,col,hint_when_completion_triggered)
:param completions:
:return:
'''
hint = self._get_hint()
logger.debug('hint_when_completion_triggered:{0},current_hint:{1}'.format(text_cursor_content[2], hint))
if hint.startswith(text_cursor_content[2]):
if len(completions) == 1:
if completions[0].name == self._get_hint():
self.hide_autocomp()
return
self.autocomp_show(completions)
else:
self.hide_autocomp()
def hide_autocomp(self):
self.popup_hint_widget.hide_autocomp()
def on_text_changed(self):
super(PMPythonCodeEdit, self).on_text_changed()
self._get_textcursor_pos()
cursor_pos = self.cursorRect()
self.popup_hint_widget.setGeometry(
cursor_pos.x() + 5, cursor_pos.y() + 20,
self.popup_hint_widget.sizeHint().width(),
self.popup_hint_widget.sizeHint().height())
self._request_autocomp()
def _insert_autocomp(self, e: QModelIndex = None):
row = self.popup_hint_widget.currentRow()
if 0 <= row < self.popup_hint_widget.count():
complete, word_type = self.popup_hint_widget.get_complete(row)
word = self.popup_hint_widget.get_text(row)
if not word.startswith(self._get_hint()):
return
comp = word[len(self._get_hint()):]
self.insertPlainText(comp)
textcursor: QTextCursor = self.textCursor()
word = self.get_word(textcursor.blockNumber(), textcursor.columnNumber() - 1)
if word_type == 'function':
self.insertPlainText('()')
tc = self.textCursor()
tc.movePosition(QTextCursor.PreviousCharacter)
self.setTextCursor(tc)
elif word_type == 'keyword':
self.insertPlainText(' ')
self.popup_hint_widget.hide()
def _get_nearby_text(self):
block_text = self.textCursor().block().text()
col = self.textCursor().columnNumber()
return block_text[:col]
def _get_hint(self):
block_text = self.textCursor().block().text()
if block_text.lstrip().startswith('#'): # 在注释中
return ''
col = self.textCursor().columnNumber()
nearby_text = block_text[:col]
hint = re.split(
'[.:;,?!\s \+ \- = \* \\ \/ \( \)\[\]\{\} ]', nearby_text)[-1]
return hint
def _request_autocomp(self):
pos = self._get_textcursor_pos()
nearby_text = self._get_nearby_text()
hint = self._get_hint()
if hint == '' and not nearby_text.endswith(('.', '\\\\', '/')):
self.popup_hint_widget.hide_autocomp()
return
self.autocomp_thread.text_cursor_pos = (pos[0] + 1, pos[1])
self.autocomp_thread.text = self.toPlainText()
def autocomp_show(self, completions: List['Completion']):
l = []
if len(completions) != 0:
self.popup_hint_widget.set_completions(completions)
else:
self.popup_hint_widget.hide()
self.popup_hint_widget.autocomp_list = l
def _get_textcursor_pos(self) -> Tuple[int, int]:
return self.textCursor().blockNumber(), self.textCursor().columnNumber()
def mousePressEvent(self, a0: QMouseEvent) -> None:
# PluginInterface.show_tool_bar('code_editor_toolbar')
if self.popup_hint_widget.isVisible():
self.popup_hint_widget.hide_autocomp()
super().mousePressEvent(a0)
def keyPressEvent(self, event: QKeyEvent) -> None:
super().keyPressEvent(event)
def on_back_tab(self):
cursor = self.textCursor()
if cursor.hasSelection():
self.editUnindent()
else:
cursor = self.textCursor()
cursor.clearSelection()
cursor.movePosition(QTextCursor.StartOfBlock)
for i in range(4):
cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, 1)
if not cursor.selectedText().endswith(' '):
cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor, 1)
break
cursor.removeSelectedText()
def on_tab(self):
cursor = self.textCursor()
if cursor.hasSelection():
self.editIndent()
return
else:
nearby_text = self._get_nearby_text()
hint = self._get_hint()
if hint == '' and not nearby_text.endswith(('.', '\\\\', '/')):
cursor = self.textCursor()
cursor.insertText(" ")
else:
self._request_autocomp()
def mouseMoveEvent(self, e: QMouseEvent):
"""
鼠标移动
移动到marker上的时候,便弹出提示框。
:param e:
:return:
"""
super(PMPythonCodeEdit, self).mouseMoveEvent(e)
cursor: QTextCursor = self.cursorForPosition(e.pos())
if not self.should_check_code():
return
line, col = cursor.blockNumber(), cursor.positionInBlock()
flag = False
text = ''
if line in self.highlighter.highlight_marks:
marker_propertys = self.highlighter.highlight_marks.get(line)
for marker_property in marker_propertys:
start = marker_property[0]
if marker_property[1] == -1:
end = len(cursor.block().text())
else:
end = start + marker_property[1]
if start <= col < end:
flag = True
text += marker_property[3] + '\n'
break
self.hint_widget.setGeometry(e.x() + 30, e.y(),
self.hint_widget.sizeHint().width(), self.hint_widget.sizeHint().height())
self.hint_widget.setText(text.strip())
self.hint_widget.setVisible(flag)
def should_check_code(self) -> bool:
"""
返回是否会对代码做insight.
:return:
"""
return len(self.toPlainText()) < 10000 * 120
if __name__ == '__main__':
app = QApplication([])
e = PMPythonCodeEdit()
e.show()
app.exec_()
|
"""LintPlaybook version"""
__version__ = '0.1.dev1'
|
import os.path
from django import template
from django.utils.html import format_html
from django.template.defaultfilters import filesizeformat
from django.template.loader import get_template
register = template.Library()
@register.filter
def attachment_link(attachment):
kwargs = {
'href': attachment.attachment.url,
'title': attachment.comment or attachment.attachment.url,
'label': attachment.label or attachment.attachment.name.split("/")[-1],
}
return format_html('<a target="_blank" href="{href}" title="{title}">{label}</a>', **kwargs)
@register.filter
def attachment_img(attachment):
kwargs = {
'src': attachment.attachment.url,
'alt': attachment.comment or attachment.label or attachment.attachment.url,
}
return format_html('<img class="qa-image" src="{src}" alt="{alt}"/>', **kwargs)
@register.filter
def ti_attachment_img(attachment):
context = {
'src': attachment.attachment.url,
'name': os.path.basename(attachment.attachment.name),
'size': filesizeformat(attachment.attachment.size),
}
return get_template("attachments/ti_img.html").render(context)
|
from .item import Item
from .wall import Wall
from .decoration import Decoration
from .item_grid import ItemGrid
from .player import Player
from .person import Person
from .enemy import Enemy
from .door import Door
from .item_weapon import ItemWeapon
from .item_ammo import ItemAmmo
from .item_score import ItemScore
class Map2d():
def __init__(self, items: []):
self.__grid = ItemGrid(items)
self.__players = {item.player_id: item for item in items if isinstance(item, Player)}
self.__persons = [item for item in items if isinstance(item, Person)]
self.__doors = [item for item in items if isinstance(item, Door)]
self.__enemies = [item for item in items if isinstance(item, Enemy)]
@property
def grid(self) -> Item:
return self.__grid
def update(self, delta_time: float):
for player_id in self.__players:
player = self.__players[player_id]
player.update(delta_time, self.__grid)
player.adjust_collision(self.__grid)
player.cast(self.__grid)
for door in self.__doors:
door.update(delta_time, self.__persons)
def get_player(self, player_id: str):
return self.__players[player_id]
@staticmethod
def create_with_pattern(pattern: str):
items = []
lines = pattern.splitlines()
list_type_ids = [[s[n:n+2]
for n in range(0, len(s), 2)] for s in lines]
for block_y, type_ids in enumerate(list_type_ids):
for block_x, type_id_hex in enumerate(type_ids):
if type_id_hex.strip() == "":
continue
type_id = int(type_id_hex, 16)
if type_id in type_ids_walls_solid:
items.append(Wall(block_x, block_y, type_id))
elif type_id in type_ids_weapon:
items.append(ItemWeapon(block_x + 0.5, block_y + 0.5, type_id, False))
elif type_id in type_ids_ammo:
items.append(ItemAmmo(block_x + 0.5, block_y + 0.5, type_id, False))
elif type_id in type_ids_score:
items.append(ItemScore(block_x + 0.5, block_y + 0.5, type_id, False))
elif type_id in type_ids_decorations_non_solid:
items.append(Decoration(block_x + 0.5, block_y + 0.5, type_id, False))
elif type_id in type_ids_decorations_solid:
items.append(Decoration(block_x + 0.5, block_y + 0.5, type_id, True))
elif type_id in type_ids_doors_horizontal:
items.append(Door(block_x, block_y + 0.5, type_id, False))
elif type_id in type_ids_doors_vertical:
items.append(Door(block_x + 0.5, block_y, type_id, True))
elif type_id in type_ids_enemies:
items.append(Enemy.create(block_x + 0.5, block_y + 0.5, type_id))
elif type_id in type_ids_player:
items.append(Player.create(block_x + 0.5, block_y + 0.5, type_id))
return Map2d(items)
type_ids_decorations_solid = [57, 60, 61, 63, 65, 66, 68,
69, 70, 71, 74, 75, 76, 80, 93, 94, 95, 97]
# type_ids_decorations_non_solid = [58, 62, 67,
# 72, 73, 77, 81, 92, 96, 99, 100, 101, 102]
type_ids_decorations_non_solid = [i for i in range(56,120) if i not in type_ids_decorations_solid]
type_ids_walls_solid = list(range(48)) + [53, 54]
type_ids_score = [ 87, 88, 89 ]
type_ids_weapon = [ 85, 86 ]
type_ids_ammo = [ 84 ]
type_ids_doors_horizontal = [50]
type_ids_doors_vertical = [49]
type_ids_player = [255]
type_ids_enemies = [130, 131]
|
#!/usr/bin/python38
import hashlib
import lxml
import re
import requests
from lxml import etree
from libmysql_utils.mysql8 import mysqlHeader, mysqlBase
from sqlalchemy import Column, String, Integer, Float, Date, Text
from sqlalchemy.ext.declarative import declarative_base
from libutils.log import Log
__version__ = 2
article_base = declarative_base()
class spider(object):
def __init__(self):
self.start_url = None
def fetch_start_url(self, url):
if re.match(r'(https?)://.', url):
self.start_url = url
else:
raise Exception
def query_url(self, url):
# query whether url exists.
return True
class formArticle(article_base):
__tablename__ = 'news'
idx = Column(Integer)
title = Column(String(50))
url = Column(String(50), primary_key=True)
release_date = Column(Date)
source = Column(String(10))
content = Column(Text)
keyword = Column(Text)
class news(object):
def __init__(self, html: lxml.etree._Element):
if not isinstance(html, lxml.etree._Element):
raise TypeError('Article is not a lxml.etree._Element object.')
self._html = html
if self._html:
self._title = ""
self._get_title()
self._author = ""
self._content = ""
self._get_text()
self._date = ""
self._get_date()
self._source = ""
self.url = ""
def __str__(self):
return f"《{self.title}》:{self.author}:{self.date}"
@Log
def _get_date(self):
"""
eld version:
date_string = self._html.xpath("//div[@class='post_time_source']/text()")
"""
date_string = self._html.xpath("//div[@class='post_info']/text()")
for s in date_string:
result = re.search(r'\d{4}\-\d{2}\-\d{2}', s)
if result:
self._date = result.group()
break
else:
self._date = ''
@property
def date(self) -> str:
return self._date
@Log
def _get_title(self):
result = self._html.xpath("//div/h1/text()")
if result:
self._title = result[0]
else:
self._title = ""
@property
def title(self) -> str:
return self._title
@property
def source(self):
article_source = self._html.xpath("//div[@class='ep-source cDGray']/span[@class='left']/text()")
if article_source:
result = re.split(r':', article_source[0])
self._source = result[1]
else:
self._source = ""
return self._source
@property
def author(self):
article_author = self._html.xpath("//span[@class='ep-editor']/text()")
if article_author:
result = re.split(r':', article_author[0])
self._author = result[1]
else:
self._author = ""
return self._author
@Log
def _get_text(self):
"""
elder version:
_text = self._html.xpath("//div[@class='post_text']/p")
"""
_text = self._html.xpath("//div[@class='post_body']/p")
for line in _text:
result = line.xpath(
"./text()"
"|.//*[name(.)='font' or name(.)='b' or name(.)='a']/text()")
for subline in result:
self._content += subline
# remove space
self._content.replace(' ', '')
self._content.replace('\content', '')
self._content.replace('\n', '')
# remove \content \n etc.
@property
def text(self) -> str:
return self._content
if __name__ == "__main__":
url = "https://money.163.com/21/0302/19/G43UHG4S00259DLP.html"
text = requests.get(url)
h = etree.HTML(text.text)
art = article(h)
art.url = url
print("url", art.url)
print("title", art.title)
print("date", art.date)
# print("text", art.text)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script connects to the system vm socket and writes the
# authorized_keys and cmdline data to it. The system VM then
# reads it from /dev/vport0p1 in cloud_early_config
#
import argparse
import os
import socket
SOCK_FILE = "/var/lib/libvirt/qemu/{name}.agent"
PUB_KEY_FILE = "/root/.ssh/id_rsa.pub.cloud"
MESSAGE = "pubkey:{key}\ncmdline:{cmdline}\n"
def send_to_socket(sock_file, key_file, cmdline):
if not os.path.exists(key_file):
print("ERROR: ssh public key not found on host at {0}".format(key_file))
return 1
try:
with open(key_file, "r") as f:
pub_key = f.read()
except IOError as e:
print("ERROR: unable to open {0} - {1}".format(key_file, e.strerror))
return 1
# Keep old substitution from perl code:
cmdline = cmdline.replace("%", " ")
msg = MESSAGE.format(key=pub_key, cmdline=cmdline)
if not os.path.exists(sock_file):
print("ERROR: {0} socket not found".format(sock_file))
return 1
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(sock_file)
s.sendall(msg)
s.close()
except IOError as e:
print("ERROR: unable to connect to {0} - {1}".format(sock_file, e.strerror))
return 1
return 0 # Success
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Send configuration to system VM socket")
parser.add_argument("-n", "--name", required=True, help="Name of VM")
parser.add_argument("-p", "--cmdline", required=True, help="Command line")
arguments = parser.parse_args()
socket_file = SOCK_FILE.format(name=arguments.name)
exit(send_to_socket(socket_file, PUB_KEY_FILE, arguments.cmdline))
|
from preprocessing_utilities import create_and_save_uniques, load_uniques_create_dict_map
from Scripts.utilities import start_correct_cluster, read_dataset, save_dataset, parse_args
from preprocessing_utilities import dict_path, temp_output_path, dataset_path
import numpy as np
import dask
import dask.dataframe as dd
out_cols = ["number_of_photo", "number_of_gif", "number_of_video", 'presence_of_photo', 'presence_of_gif', 'presence_of_video']
out_frame_name = "mapped_media"
def functional_map_media(media_series: dask.dataframe.Series) -> dask.dataframe.DataFrame:
# Map the feature
n_photo = media_series.str.count('Photo')
n_gif = media_series.str.count('GIF')
n_video = media_series.str.count('Video')
out_media = dd.concat(
[
n_photo.astype(np.uint8).to_frame(name='number_of_photo'),
n_gif.astype(np.uint8).to_frame(name='number_of_gif'),
n_video.astype(np.uint8).to_frame(name='number_of_video'),
n_photo.astype(np.bool_).to_frame(name='presence_of_photo'),
n_gif.astype(np.bool_).to_frame(name='presence_of_gif'),
n_video.astype(np.bool_).to_frame(name='presence_of_video'),
],
axis=1, ignore_unknown_divisions=True
)
return out_media
if __name__ == '__main__':
generate_dict, is_test = parse_args()
c = start_correct_cluster(is_test, use_processes=False)
### Map media
# Load dataset
df = read_dataset(dataset_path, ["raw_feature_tweet_media"])
# Do functional mapping
media_df = functional_map_media(df["raw_feature_tweet_media"])
# Write the output dataset
save_dataset(temp_output_path, media_df, out_frame_name)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import gin.tf
slim = tf.contrib.slim
gin.constant('networks.STACK_SIZE_1', 1)
gin.constant('networks.STACK_SIZE_4', 4)
gin.constant('networks.OBSERVATION_DTYPE_FLOAT32', tf.float32)
def _atari_dqn_network(num_actions, num_sub_actions, state, use_dueling):
del num_sub_actions
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(net, 32, [8, 8], stride=4)
net = slim.conv2d(net, 64, [4, 4], stride=2)
net = slim.conv2d(net, 64, [3, 3], stride=1)
net = slim.flatten(net)
if use_dueling:
v_net = slim.fully_connected(net, 512)
v_value = slim.fully_connected(v_net, 1, activation_fn=None)
adv_net = slim.fully_connected(net, 512)
adv_values = slim.fully_connected(adv_net, num_actions,
activation_fn=None)
adv_values -= tf.reduce_mean(adv_values, axis=1, keepdims=True)
q_values = adv_values + v_value
return q_values, v_value
else:
net = slim.fully_connected(net, 512)
q_values = slim.fully_connected(net, num_actions,
activation_fn=None)
return q_values, None
@gin.configurable
def _atari_hgqn_network(num_actions, num_sub_actions, state, use_dueling,
hyperedge_orders, mixer):
assert num_sub_actions == [3,3,2]
assert num_actions == np.prod(num_sub_actions)
assert all(x in [1,2,3] for x in hyperedge_orders)
ATARI_MIXER_NET_H = 25
n = len(num_sub_actions)
num_heads = int(0)
if 1 in hyperedge_orders:
num_heads += int(n)
if 2 in hyperedge_orders:
num_heads += int(n*(n-1)/2)
if 3 in hyperedge_orders:
num_heads += int(1)
if num_heads == 3:
assert (hyperedge_orders == [1]) or (hyperedge_orders == [2])
LAYER_WIDTH = 172
elif num_heads == 6:
assert hyperedge_orders == [1,2]
LAYER_WIDTH = 86
elif num_heads == 4:
assert (hyperedge_orders == [1,3]) or (hyperedge_orders == [2,3])
LAYER_WIDTH = 128
elif num_heads == 7:
assert hyperedge_orders == [1,2,3]
LAYER_WIDTH = 74
else:
assert num_heads == 1
assert hyperedge_orders == [3]
raise AssertionError('Use `atari_dqn_network` for [3].')
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(net, 32, [8, 8], stride=4)
net = slim.conv2d(net, 64, [4, 4], stride=2)
net = slim.conv2d(net, 64, [3, 3], stride=1)
net = slim.flatten(net)
if list(filter((3).__ne__, hyperedge_orders)) == [] or mixer == 'sum':
order_1_values = 0.0
order_2_values = 0.0
order_3_values = 0.0
bq_values_orig = None
p_bq_values_orig = None
t_bq_values_orig = None
if 1 in hyperedge_orders:
bq_values = []
for num_sub_actions_dim in num_sub_actions:
bnet = slim.fully_connected(net, LAYER_WIDTH)
bq_values_dim = slim.fully_connected(bnet, num_sub_actions_dim,
activation_fn=None)
bq_values.append(bq_values_dim)
bq_values_orig = bq_values
if mixer == 'sum':
from hyperdopamine.agents.utils import SUM_ORDER_1_MAP
order_1_mapping_matrices = SUM_ORDER_1_MAP
bq_values = tf.concat(bq_values, -1)
order_1_mapping_matrices = tf.transpose(order_1_mapping_matrices)
assert order_1_mapping_matrices.shape == (8, num_actions)
order_1_values = tf.matmul(bq_values, order_1_mapping_matrices)
assert order_1_values.shape == (state.shape[0], num_actions)
if 2 in hyperedge_orders:
p_bq_values = []
branch_pairs = [[0, 1], [0, 2], [1, 2]]
assert len(branch_pairs) == int(n*(n-1)/2)
for pair_of_branches,test in zip(branch_pairs,[9,6,6]):
branch_i, branch_j = pair_of_branches
pair_output_length = \
num_sub_actions[branch_i] * num_sub_actions[branch_j]
assert pair_output_length == test
p_bnet = slim.fully_connected(net, LAYER_WIDTH)
p_bq_values_pair_of_dims = \
slim.fully_connected(p_bnet, pair_output_length,
activation_fn=None)
p_bq_values.append(p_bq_values_pair_of_dims)
p_bq_values_orig = p_bq_values
if mixer == 'sum':
from hyperdopamine.agents.utils import SUM_ORDER_2_MAP
order_2_mapping_matrices = SUM_ORDER_2_MAP
p_bq_values = tf.concat(p_bq_values, -1)
order_2_mapping_matrices = tf.transpose(order_2_mapping_matrices)
assert order_2_mapping_matrices.shape == (21, num_actions)
order_2_values = tf.matmul(p_bq_values, order_2_mapping_matrices)
assert order_2_values.shape == (state.shape[0], num_actions)
if 3 in hyperedge_orders:
t_bnet = slim.fully_connected(net, LAYER_WIDTH)
order_3_values = slim.fully_connected(t_bnet, num_actions,
activation_fn=None)
t_bq_values_orig = order_3_values
assert order_3_values.shape == (state.shape[0], num_actions)
if (list(filter((3).__ne__, hyperedge_orders)) != [] and
mixer == 'universal'):
initializer_W = slim.initializers.xavier_initializer()
initializer_b = slim.init_ops.zeros_initializer()
W1 = tf.get_variable('W1', shape=[num_heads, ATARI_MIXER_NET_H],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b1 = tf.get_variable('b1', shape=[ATARI_MIXER_NET_H],
initializer=initializer_b, dtype=tf.float32, trainable=True)
W2 = tf.get_variable('W2', shape=[ATARI_MIXER_NET_H, 1],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b2 = tf.get_variable('b2', shape=[1],
initializer=initializer_b, dtype=tf.float32, trainable=True)
if 1 in hyperedge_orders:
from hyperdopamine.agents.utils import GENERAL_ORDER_1_MAP
order_1_mapping_matrices = GENERAL_ORDER_1_MAP
if 2 in hyperedge_orders:
from hyperdopamine.agents.utils import GENERAL_ORDER_2_MAP
order_2_mapping_matrices = GENERAL_ORDER_2_MAP
all_values_per_action = []
for act in range(num_actions):
values_to_mix_act = []
if 1 in hyperedge_orders:
l1_per_composite = []
for bq,b2c in zip(bq_values, order_1_mapping_matrices[act]):
b2c = tf.expand_dims(b2c, -1)
out = tf.matmul(bq, b2c)
out = tf.squeeze(out, -1)
l1_per_composite.append(out)
l1_per_composite = tf.stack(l1_per_composite, -1)
values_to_mix_act.append(l1_per_composite)
if 2 in hyperedge_orders:
l2_per_composite = []
for pbq,pb2c in zip(p_bq_values, order_2_mapping_matrices[act]):
pb2c = tf.expand_dims(pb2c, -1)
out = tf.matmul(pbq, pb2c)
out = tf.squeeze(out, -1)
l2_per_composite.append(out)
l2_per_composite = tf.stack(l2_per_composite, -1)
values_to_mix_act.append(l2_per_composite)
input_values = tf.concat(values_to_mix_act, -1)
assert input_values.shape == (state.shape[0],
num_heads - (1 if 3 in hyperedge_orders else 0))
all_values_per_action.append(input_values)
all_values_per_action = tf.stack(all_values_per_action, -1)
if 3 in hyperedge_orders:
all_values_per_action = tf.concat([all_values_per_action, \
tf.reshape(order_3_values, [-1, 1, num_actions])], 1)
assert all_values_per_action.shape == \
(state.shape[0], num_heads, num_actions)
all_values_per_action = tf.transpose(all_values_per_action,
perm=[2, 0, 1])
all_values_per_action = tf.reshape(all_values_per_action,
[-1, num_heads])
q_values = tf.add(tf.matmul(all_values_per_action, W1), b1)
q_values = tf.nn.relu(q_values)
q_values = tf.add(tf.matmul(q_values, W2), b2)
q_values = tf.reshape(q_values, [num_actions, -1])
q_values = tf.transpose(q_values)
else:
assert (list(filter((3).__ne__, hyperedge_orders)) == [] or
mixer == 'sum')
q_values = order_1_values + order_2_values + order_3_values
assert q_values.shape == (state.shape[0], num_actions)
if use_dueling:
v_net = slim.fully_connected(net, 512)
v_value = slim.fully_connected(v_net, 1, activation_fn=None)
q_values -= tf.reduce_mean(q_values, axis=1, keepdims=True)
q_values += v_value
return q_values, v_value, bq_values_orig, p_bq_values_orig, \
t_bq_values_orig
else:
return q_values, None, bq_values_orig, p_bq_values_orig, \
t_bq_values_orig
@gin.configurable
def _hgqn_network(num_actions, num_sub_actions, state, use_dueling,
hyperedge_orders, mixer):
n = len(num_sub_actions)
m = max(num_sub_actions)
assert max(num_sub_actions) == min(num_sub_actions)
assert num_actions == m**n
possible_factors = \
list(filter((0).__ne__, range(-1,(n if n<4 else 4),1)))
assert all(x in possible_factors for x in hyperedge_orders)
MIXER_NET_H = 25
SHARED_NET_H1 = 600
SHARED_NET_H2 = 400
LAST_NET_H = 400
num_heads = int(0)
if 1 in hyperedge_orders:
num_heads += int(n)
if 2 in hyperedge_orders:
num_heads += int(n*(n-1)/2)
if 3 in hyperedge_orders:
num_heads += int(n*(n-1)*(n-2)/6)
if -1 in hyperedge_orders:
num_heads += int(1)
from hyperdopamine.agents.utils import ceil_rounder
LAYER_WIDTH = ceil_rounder(LAST_NET_H / num_heads)
net = tf.cast(state, tf.float32)
net = slim.flatten(net)
if SHARED_NET_H1 is not None:
net = slim.fully_connected(net, SHARED_NET_H1)
if SHARED_NET_H2 is not None:
net = slim.fully_connected(net, SHARED_NET_H2)
if (list(filter((-1).__ne__, hyperedge_orders)) == [] or
mixer == 'sum'):
order_1_values = 0.0
order_2_values = 0.0
order_3_values = 0.0
order_Nv_values = 0.0
bq_values_orig = None
p_bq_values_orig = None
t_bq_values_orig = None
n_bq_values_orig = None
if 1 in hyperedge_orders:
bq_values = []
for num_sub_actions_dim in num_sub_actions:
bnet = slim.fully_connected(net, LAYER_WIDTH)
bq_values_dim = slim.fully_connected(bnet, num_sub_actions_dim,
activation_fn=None)
bq_values.append(bq_values_dim)
bq_values_orig = bq_values
if mixer == 'sum':
from hyperdopamine.agents.utils import create_sum_order_1_map
order_1_mapping_matrices = create_sum_order_1_map(
num_branches=n, num_sub_actions_per_branch=m)
bq_values = tf.concat(bq_values, -1)
order_1_mapping_matrices = tf.transpose(order_1_mapping_matrices)
assert order_1_mapping_matrices.shape == (n*m, num_actions)
order_1_values = tf.matmul(bq_values, order_1_mapping_matrices)
assert order_1_values.shape == (state.shape[0], num_actions)
if 2 in hyperedge_orders:
p_bq_values = []
num_branch_pairs = int(n*(n-1)/2)
for _ in range(num_branch_pairs):
p_bnet = slim.fully_connected(net, LAYER_WIDTH)
p_bq_values_pair_of_dims = \
slim.fully_connected(p_bnet, m**2, activation_fn=None)
p_bq_values.append(p_bq_values_pair_of_dims)
p_bq_values_orig = p_bq_values
if mixer == 'sum':
from hyperdopamine.agents.utils import create_sum_order_2_map
order_2_mapping_matrices = create_sum_order_2_map(
num_branches=n, num_sub_actions_per_branch=m)
p_bq_values = tf.concat(p_bq_values, -1)
order_2_mapping_matrices = tf.transpose(order_2_mapping_matrices)
assert order_2_mapping_matrices.shape == (num_branch_pairs*m**2,
num_actions)
order_2_values = tf.matmul(p_bq_values, order_2_mapping_matrices)
assert order_2_values.shape == (state.shape[0], num_actions)
if 3 in hyperedge_orders:
t_bq_values = []
num_branch_triplets = int(n*(n-1)*(n-2)/6)
for _ in range(num_branch_triplets):
t_bnet = slim.fully_connected(net, LAYER_WIDTH)
t_bq_values_triplet_of_dims = \
slim.fully_connected(t_bnet, m**3, activation_fn=None)
t_bq_values.append(t_bq_values_triplet_of_dims)
t_bq_values_orig = t_bq_values
if mixer == 'sum':
from hyperdopamine.agents.utils import create_sum_order_3_map
order_3_mapping_matrices = create_sum_order_3_map(
num_branches=n, num_sub_actions_per_branch=m)
t_bq_values = tf.concat(t_bq_values, -1)
order_3_mapping_matrices = tf.transpose(order_3_mapping_matrices)
assert order_3_mapping_matrices.shape == (num_branch_triplets*m**3,
num_actions)
order_3_values = tf.matmul(t_bq_values, order_3_mapping_matrices)
assert order_3_values.shape == (state.shape[0], num_actions)
if -1 in hyperedge_orders:
n_bnet = slim.fully_connected(net, LAYER_WIDTH)
order_Nv_values = slim.fully_connected(n_bnet, num_actions,
activation_fn=None)
n_bq_values_orig = order_Nv_values
assert order_Nv_values.shape == (state.shape[0], num_actions)
if (list(filter((-1).__ne__, hyperedge_orders)) != [] and
mixer == 'universal'):
initializer_W = slim.initializers.xavier_initializer()
initializer_b = slim.init_ops.zeros_initializer()
W1 = tf.get_variable('W1', shape=[num_heads, MIXER_NET_H],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b1 = tf.get_variable('b1', shape=[MIXER_NET_H],
initializer=initializer_b, dtype=tf.float32, trainable=True)
W2 = tf.get_variable('W2', shape=[MIXER_NET_H, 1],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b2 = tf.get_variable('b2', shape=[1],
initializer=initializer_b, dtype=tf.float32, trainable=True)
all_values = []
if 1 in hyperedge_orders:
from hyperdopamine.agents.utils import create_general_order_1_map
order_1_mapping_matrices = create_general_order_1_map(
num_branches=n, num_sub_actions_per_branch=m)
order_1_mapping_matrices = np.array(order_1_mapping_matrices)
order_1_mapping_matrices = \
np.transpose(order_1_mapping_matrices, (1, 2, 0))
order_1_mapping_matrices = \
np.expand_dims(order_1_mapping_matrices, axis=0)
assert order_1_mapping_matrices.shape == (1, n, m, num_actions)
bq_values = tf.concat(bq_values, -1)
bq_values = tf.reshape(bq_values, [-1, n, m])
bq_values = tf.expand_dims(bq_values, axis=-1)
out_l1 = tf.multiply(bq_values, order_1_mapping_matrices)
out_l1 = tf.reduce_sum(out_l1, axis=2)
out_l1 = tf.transpose(out_l1, (0,2,1))
assert out_l1.shape == (state.shape[0], num_actions, n)
all_values.append(out_l1)
del order_1_mapping_matrices
if 2 in hyperedge_orders:
from hyperdopamine.agents.utils import create_general_order_2_map
order_2_mapping_matrices = create_general_order_2_map(
num_branches=n, num_sub_actions_per_branch=m)
order_2_mapping_matrices = np.array(order_2_mapping_matrices)
order_2_mapping_matrices = \
np.transpose(order_2_mapping_matrices, (1,2,0))
order_2_mapping_matrices = \
np.expand_dims(order_2_mapping_matrices, axis=0)
assert order_2_mapping_matrices.shape == \
(1, n*(n-1)/2, m**2, num_actions)
p_bq_values = tf.concat(p_bq_values, -1)
p_bq_values = tf.reshape(p_bq_values,
[-1, int(n*(n-1)/2), int(m**2)])
p_bq_values = tf.expand_dims(p_bq_values, axis=-1)
out_l2 = tf.multiply(p_bq_values, order_2_mapping_matrices)
out_l2 = tf.reduce_sum(out_l2, axis=2)
out_l2 = tf.transpose(out_l2, (0,2,1))
assert out_l2.shape == (state.shape[0], num_actions, n*(n-1)/2)
all_values.append(out_l2)
del order_2_mapping_matrices
if 3 in hyperedge_orders:
from hyperdopamine.agents.utils import create_general_order_3_map
order_3_mapping_matrices = create_general_order_3_map(
num_branches=n, num_sub_actions_per_branch=m)
order_3_mapping_matrices = np.array(order_3_mapping_matrices)
order_3_mapping_matrices = \
np.transpose(order_3_mapping_matrices, (1,2,0))
order_3_mapping_matrices = \
np.expand_dims(order_3_mapping_matrices, axis=0)
assert order_3_mapping_matrices.shape == \
(1, n*(n-1)*(n-2)/6, m**3, num_actions)
t_bq_values = tf.concat(t_bq_values, -1)
t_bq_values = tf.reshape(t_bq_values,
[-1, n*(n-1)*(n-2)/6, m**3])
t_bq_values = tf.expand_dims(t_bq_values, axis=-1)
out_l3 = tf.multiply(t_bq_values, order_3_mapping_matrices)
out_l3 = tf.reduce_sum(out_l3, axis=2)
out_l3 = tf.transpose(out_l3, (0,2,1))
assert out_l3.shape == \
(state.shape[0], num_actions, n*(n-1)*(n-2)/6)
all_values.append(out_l3)
del order_3_mapping_matrices
if -1 in hyperedge_orders:
out_ln = tf.expand_dims(order_Nv_values, axis=-1)
all_values.append(out_ln)
all_values_per_action = tf.concat(all_values, -1)
all_values_per_action = \
tf.transpose(all_values_per_action, (1,0,2))
assert all_values_per_action.shape == \
(num_actions, state.shape[0], num_heads)
all_values_per_action = tf.reshape(all_values_per_action,
[-1, num_heads])
q_values = tf.add(tf.matmul(all_values_per_action, W1), b1)
q_values = tf.nn.relu(q_values)
q_values = tf.add(tf.matmul(q_values, W2), b2)
q_values = tf.reshape(q_values, [num_actions, -1])
q_values = tf.transpose(q_values)
del all_values_per_action
else:
assert (list(filter((-1).__ne__, hyperedge_orders)) == [] or
mixer == 'sum')
q_values = order_1_values + order_2_values + order_3_values + \
order_Nv_values
assert q_values.shape == (state.shape[0], num_actions)
if use_dueling:
v_net = slim.fully_connected(net, LAST_NET_H)
v_value = slim.fully_connected(v_net, 1, activation_fn=None)
q_values -= tf.reduce_mean(q_values, axis=1, keepdims=True)
q_values += v_value
return q_values, v_value, bq_values_orig, p_bq_values_orig, \
t_bq_values_orig, n_bq_values_orig
else:
return q_values, None, bq_values_orig, p_bq_values_orig, \
t_bq_values_orig, n_bq_values_orig
@gin.configurable
def _branching_network(num_sub_actions, state, use_dueling):
from hyperdopamine.agents.utils import ceil_rounder
LAST_NET_H = 400
LAYER_WIDTH = ceil_rounder(LAST_NET_H / len(num_sub_actions))
net = tf.cast(state, tf.float32)
net = slim.flatten(net)
net = slim.fully_connected(net, 600)
net = slim.fully_connected(net, 400)
bq_values = []
for num_sub_actions_dim in num_sub_actions:
bnet = slim.fully_connected(net, LAYER_WIDTH)
bq_values_dim = \
slim.fully_connected(bnet, num_sub_actions_dim,
activation_fn=None)
if use_dueling:
bq_values_dim -= tf.reduce_mean(bq_values_dim, axis=1,
keepdims=True)
bq_values.append(bq_values_dim)
if use_dueling:
v_net = slim.fully_connected(net, LAST_NET_H)
v_value = slim.fully_connected(v_net, 1, activation_fn=None)
return bq_values, v_value
else:
return bq_values, None
@gin.configurable
def atari_dqn_network(num_actions, num_sub_actions, network_type, state,
**kwargs):
assert kwargs['hyperedge_orders'] == None and kwargs['mixer'] == None
q_values, v_value = _atari_dqn_network(num_actions, num_sub_actions,
state, kwargs['use_dueling'])
return network_type(q_values, v_value, None, None, None, None)
@gin.configurable
def atari_hgqn_network(num_actions, num_sub_actions, network_type, state,
**kwargs):
assert num_sub_actions == [3,3,2]
if (kwargs['hyperedge_orders'] == None or
not kwargs['hyperedge_orders']):
raise ValueError('Unspecified hyperedge orders.')
assert (kwargs['mixer'] == 'sum' or kwargs['mixer'] == 'universal' or
(kwargs['mixer'] == None and kwargs['hyperedge_orders'] == [3]))
q_values, v_value, bq_values, pbq_values, tbq_values = \
_atari_hgqn_network(num_actions, num_sub_actions, state,
kwargs['use_dueling'],
kwargs['hyperedge_orders'],
kwargs['mixer'])
return network_type(q_values, v_value, bq_values, pbq_values,
tbq_values, None)
@gin.configurable
def hgqn_network(num_actions, num_sub_actions, network_type, state,
**kwargs):
assert num_sub_actions != int
if (kwargs['hyperedge_orders'] == None or
not kwargs['hyperedge_orders']):
raise ValueError('Unspecified hyperedge orders.')
assert (kwargs['mixer'] == 'sum' or kwargs['mixer'] == 'universal' or
(kwargs['mixer'] == None and kwargs['hyperedge_orders'] == [-1]))
q_values, v_value, bq_values, pbq_values, tbq_values, nbq_values = \
_hgqn_network(num_actions, num_sub_actions, state,
kwargs['use_dueling'],
kwargs['hyperedge_orders'],
kwargs['mixer'])
return network_type(q_values, v_value, bq_values, pbq_values, tbq_values,
nbq_values)
@gin.configurable
def branching_network(num_sub_actions, network_type, state, **kwargs):
assert num_sub_actions != int
bq_values, v_value = _branching_network(num_sub_actions, state,
kwargs['use_dueling'])
return network_type(bq_values, v_value)
@gin.configurable
def sum_mixer(network_type, utils, **kwargs):
net = tf.stack(utils, axis=1)
assert net.shape[1] == len(utils)
q_value = tf.reduce_sum(utils, axis=0)
q_value = tf.expand_dims(q_value, axis=1)
if kwargs['use_dueling']:
assert q_value.shape == kwargs['v_value'].shape
q_value += kwargs['v_value']
return network_type(q_value)
@gin.configurable
def monotonic_linear_mixer(network_type, utils, **kwargs):
net = tf.stack(utils, axis=1)
assert net.shape[1] == len(utils)
W = tf.get_variable('W', shape=[len(utils), 1], dtype=tf.float32,
trainable=True, initializer=slim.initializers.xavier_initializer())
b = tf.get_variable('b', shape=[1], dtype=tf.float32, trainable=True,
initializer=slim.init_ops.zeros_initializer())
W = tf.abs(W)
q_value = tf.add(tf.matmul(net, W), b)
if kwargs['use_dueling']:
assert q_value.shape == kwargs['v_value'].shape
q_value += kwargs['v_value']
return network_type(q_value)
@gin.configurable
def monotonic_nonlinear_mixer(network_type, utils, **kwargs):
net = tf.stack(utils, axis=1)
assert net.shape[1] == len(utils)
initializer_W = slim.initializers.xavier_initializer()
initializer_b = slim.init_ops.zeros_initializer()
MIXER_NET_H = 25
W1 = tf.get_variable('W1', shape=[len(utils), MIXER_NET_H],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b1 = tf.get_variable('b1', shape=[MIXER_NET_H],
initializer=initializer_b, dtype=tf.float32, trainable=True)
W2 = tf.get_variable('W2', shape=[MIXER_NET_H, 1],
initializer=initializer_W, dtype=tf.float32, trainable=True)
b2 = tf.get_variable('b2', shape=[1],
initializer=initializer_b, dtype=tf.float32, trainable=True)
W1 = tf.abs(W1)
W2 = tf.abs(W2)
net = tf.add(tf.matmul(net, W1), b1)
net = tf.nn.elu(net) # using ELU to maintain strict monotonicity
q_value = tf.add(tf.matmul(net, W2), b2)
if kwargs['use_dueling']:
assert q_value.shape == kwargs['v_value'].shape
q_value += kwargs['v_value']
return network_type(q_value)
|
"""Initial Build
Revision ID: 34935013ab33
Revises:
Create Date: 2020-03-10 23:05:03.634539
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "34935013ab33"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"channel",
sa.Column("id", sa.String(length=32), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("active", sa.Boolean(), nullable=True),
sa.Column("infos", sa.JSON(), nullable=True),
sa.Column("hub_infos", sa.JSON(), nullable=True),
sa.Column("subscribe_timestamp", sa.DateTime(), nullable=True),
sa.Column("unsubscribe_timestamp", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_channel")),
)
op.create_table(
"user",
sa.Column("username", sa.String(length=32), nullable=False),
sa.Column("_password_hash", sa.LargeBinary(length=128), nullable=False),
sa.Column("admin", sa.Boolean(), nullable=True),
sa.Column("_pushover_key", sa.String(length=32), nullable=True),
sa.Column("_youtube_credentials", sa.JSON(), nullable=True),
sa.Column("_line_notify_credentials", sa.String(length=64), nullable=True),
sa.Column("_dropbox_credentials", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("username", name=op.f("pk_user")),
)
op.create_table(
"notification",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("initiator", sa.String(length=16), nullable=False),
sa.Column("username", sa.String(length=32), nullable=True),
sa.Column(
"service", sa.Enum("Pushover", "LineNotify", name="service"), nullable=True
),
sa.Column("message", sa.Text(), nullable=True),
sa.Column("kwargs", sa.JSON(), nullable=True),
sa.Column("sent_timestamp", sa.DateTime(), nullable=True),
sa.Column("response", sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(
["username"], ["user.username"], name=op.f("fk_notification_username_user")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_notification")),
)
with op.batch_alter_table("notification", schema=None) as batch_op:
batch_op.create_index(
batch_op.f("ix_notification_sent_timestamp"),
["sent_timestamp"],
unique=False,
)
op.create_table(
"subscription",
sa.Column("username", sa.String(length=32), nullable=False),
sa.Column("channel_id", sa.String(length=32), nullable=False),
sa.Column("subscribe_timestamp", sa.DateTime(), nullable=True),
sa.Column("unsubscribe_timestamp", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["channel_id"],
["channel.id"],
name=op.f("fk_subscription_channel_id_channel"),
),
sa.ForeignKeyConstraint(
["username"], ["user.username"], name=op.f("fk_subscription_username_user")
),
sa.PrimaryKeyConstraint("username", "channel_id", name=op.f("pk_subscription")),
sa.UniqueConstraint("channel_id", name=op.f("uq_subscription_channel_id")),
sa.UniqueConstraint("username", name=op.f("uq_subscription_username")),
)
op.create_table(
"tag",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("username", sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(
["username"], ["user.username"], name=op.f("fk_tag_username_user")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_tag")),
)
with op.batch_alter_table("tag", schema=None) as batch_op:
batch_op.create_index(batch_op.f("ix_tag_name"), ["name"], unique=False)
op.create_table(
"video",
sa.Column("id", sa.String(length=16), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("channel_id", sa.String(length=32), nullable=True),
sa.Column("uploaded_timestamp", sa.DateTime(), nullable=True),
sa.Column("details", sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(
["channel_id"], ["channel.id"], name=op.f("fk_video_channel_id_channel")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_video")),
)
op.create_table(
"action",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=32), nullable=False),
sa.Column(
"type",
sa.Enum("Notification", "Playlist", "Download", name="actiontype"),
nullable=False,
),
sa.Column("details", sa.JSON(), nullable=True),
sa.Column("username", sa.String(length=32), nullable=True),
sa.Column("channel_id", sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(
["channel_id"],
["subscription.channel_id"],
name=op.f("fk_action_channel_id_subscription"),
),
sa.ForeignKeyConstraint(
["username", "channel_id"],
["subscription.username", "subscription.channel_id"],
name=op.f("fk_action_username_channel_id_subscription"),
),
sa.ForeignKeyConstraint(
["username"],
["subscription.username"],
name=op.f("fk_action_username_subscription"),
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_action")),
)
op.create_table(
"callback",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("type", sa.String(length=32), nullable=True),
sa.Column("timestamp", sa.DateTime(), nullable=True),
sa.Column("infos", sa.JSON(), nullable=True),
sa.Column("channel_id", sa.String(length=32), nullable=True),
sa.Column("video_id", sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(
["channel_id"], ["channel.id"], name=op.f("fk_callback_channel_id_channel")
),
sa.ForeignKeyConstraint(
["video_id"], ["video.id"], name=op.f("fk_callback_video_id_video")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_callback")),
)
with op.batch_alter_table("callback", schema=None) as batch_op:
batch_op.create_index(
batch_op.f("ix_callback_timestamp"), ["timestamp"], unique=False
)
op.create_table(
"subscription_tag",
sa.Column("username", sa.String(length=32), nullable=False),
sa.Column("channel_id", sa.String(length=32), nullable=False),
sa.Column("tag_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["channel_id"],
["subscription.channel_id"],
name=op.f("fk_subscription_tag_channel_id_subscription"),
),
sa.ForeignKeyConstraint(
["tag_id"], ["tag.id"], name=op.f("fk_subscription_tag_tag_id_tag")
),
sa.ForeignKeyConstraint(
["username", "channel_id"],
["subscription.username", "subscription.channel_id"],
name=op.f("fk_subscription_tag_username_channel_id_subscription"),
),
sa.ForeignKeyConstraint(
["username"],
["subscription.username"],
name=op.f("fk_subscription_tag_username_subscription"),
),
sa.PrimaryKeyConstraint(
"username", "channel_id", "tag_id", name=op.f("pk_subscription_tag")
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("subscription_tag")
with op.batch_alter_table("callback", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_callback_timestamp"))
op.drop_table("callback")
op.drop_table("action")
op.drop_table("video")
with op.batch_alter_table("tag", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_tag_name"))
op.drop_table("tag")
op.drop_table("subscription")
with op.batch_alter_table("notification", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_notification_sent_timestamp"))
op.drop_table("notification")
op.drop_table("user")
op.drop_table("channel")
# ### end Alembic commands ###
|
import re
import PyPDF2
import pandas as pd
"""
"""
# Setting the file path
pdf_path = 'ED_8_PETROBRAS_PSP1_2021_RES_FINAL_OBJ_CONV_TITULOS.PDF'
# Reading the PDF file with PyPDF2
with open(pdf_path, mode='rb') as f:
reader = PyPDF2.PdfFileReader(f)
# Getting the number of pages
num_pages = reader.getNumPages()
# Getting the document content
pdf_content = ''
for i in range(num_pages):
page = reader.getPage(i)
# Getting the page content
text = page.extractText().replace('\n', '')
# Removing the page number and adding it to the full content
pdf_content += text[text.find(str(i + 1)) + len(str(i + 1)):]
# Configuring regular expression to find titles
pattern = r'ÊNFASE \d{1,2}([^a-z\d])+'
matches = re.finditer(pattern, pdf_content)
enfases = []
# Getting the name and position of titles
for match in matches:
i1 = match.start()
i2 = match.end()
name = match.group()
if name[-2:] == ' R':
name = name[:-2]
enfase = {
'name': name.strip(),
'position': (i1, i2)
}
enfases.append(enfase)
n_enfases = len(enfases)
# Preparing the dataframe
columns_resultados = ['Ênfase', 'Nº de inscrição', 'Nome do candidato', 'Nota final - Conhecimentos básicos',
'Nº acertos - Conhecimentos básicos', 'Nota final - Conhecimentos específicos',
'Nº acertos - Conhecimentos específicos', 'Nota final - Bloco 1', 'Nota final - Bloco 2',
'Nota final - Bloco 3', 'Nota final da prova']
df_enfases = []
for idx, enfase in enumerate(enfases):
# Getting the content between titles to assign it to the top one
i1 = enfase['position'][1]
i2 = enfases[idx + 1]['position'][0] if idx != (n_enfases - 1) else -1
content = pdf_content[i1:i2]
# Configuring regular expression to find candidate result information
pattern = r'\d{8}([^/])+'
candidates = [candidate.group() for candidate in re.finditer(pattern, content)]
candidates_rows = []
for candidate in candidates:
# Breaking the string and configuring the candidate row
column_data = candidate.split(',')
insc = int(column_data[0])
name = column_data[1].strip()
nf1 = float(column_data[2])
a1 = int(column_data[3])
nf2 = float(column_data[4])
a2 = int(column_data[5])
bl1 = float(column_data[6])
bl2 = float(column_data[7])
bl3 = float(column_data[8])
# Getting the final result through regex because the end of last element doesn't always end in the score
nf = float(re.search(r'\d{1,2}\.\d{2}', column_data[9]).group())
candidates_rows.append([enfase['name'], insc, name, nf1, a1, nf2, a2, bl1, bl2, bl3, nf])
# Creating the dataframe with the candidate results for the current enfase
df_enfase = pd.DataFrame(columns=columns_resultados, data=candidates_rows)
df_enfases.append(df_enfase)
# Merging all dataframes into one
df_final = pd.concat(df_enfases)
# Saving the dataframe to a csv file
df_final.to_csv('aprovados_petrobras_2022.csv', index=False)
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import boto3
import service
import cgf_lambda_settings
import cgf_service_client
import ban_handler
import identity_validator
@service.api
def post(request, user = None):
interface_url = cgf_lambda_settings.get_service_url("CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.ban(user)
}
service_client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
result = service_client.navigate('playerban').POST({"id": identity_validator.get_id_from_user(user)})
return result.DATA
@service.api
def delete(request, user = None):
interface_url = cgf_lambda_settings.get_service_url(
"CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.lift_ban(user)
}
service_client = cgf_service_client.for_url(
interface_url, verbose=True, session=boto3._get_default_session())
navigation = service_client.navigate('playerban')
cog_id = identity_validator.get_id_from_user(user)
result = navigation.DELETE(
{ "id": cog_id }
)
return result.DATA
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
"""
This file defines classes for job handlers specific for Testing farm
"""
import logging
from datetime import datetime
from typing import Optional
from celery import signature
from packit.config import JobConfig, JobType
from packit.config.package_config import PackageConfig
from packit_service.models import (
AbstractTriggerDbType,
TFTTestRunModel,
CoprBuildModel,
TestingFarmResult,
JobTriggerModel,
)
from packit_service.worker.events import (
TestingFarmResultsEvent,
PullRequestCommentGithubEvent,
MergeRequestCommentGitlabEvent,
PullRequestCommentPagureEvent,
CheckRerunCommitEvent,
CheckRerunPullRequestEvent,
)
from packit_service.service.urls import (
get_testing_farm_info_url,
get_copr_build_info_url,
)
from packit_service.worker.handlers import JobHandler
from packit_service.worker.handlers.abstract import (
TaskName,
configured_as,
reacts_to,
run_for_comment,
run_for_check_rerun,
)
from packit_service.worker.reporting import StatusReporter, BaseCommitStatus
from packit_service.worker.result import TaskResults
from packit_service.worker.testing_farm import TestingFarmJobHelper
from packit_service.constants import PG_COPR_BUILD_STATUS_SUCCESS
from packit_service.utils import dump_job_config, dump_package_config
logger = logging.getLogger(__name__)
@run_for_comment(command="test")
@run_for_check_rerun(prefix="testing-farm")
@reacts_to(PullRequestCommentGithubEvent)
@reacts_to(MergeRequestCommentGitlabEvent)
@reacts_to(PullRequestCommentPagureEvent)
@reacts_to(CheckRerunPullRequestEvent)
@reacts_to(CheckRerunCommitEvent)
@configured_as(job_type=JobType.tests)
class TestingFarmHandler(JobHandler):
"""
The automatic matching is now used only for /packit test
TODO: We can react directly to the finished Copr build.
"""
task_name = TaskName.testing_farm
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
chroot: Optional[str] = None,
build_id: Optional[int] = None,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.chroot = chroot
self.build_id = build_id
self._db_trigger: Optional[AbstractTriggerDbType] = None
@property
def db_trigger(self) -> Optional[AbstractTriggerDbType]:
if not self._db_trigger:
# copr build end
if self.build_id:
build = CoprBuildModel.get_by_id(self.build_id)
self._db_trigger = build.get_trigger_object()
# '/packit test' comment
else:
self._db_trigger = self.data.db_trigger
return self._db_trigger
def run(self) -> TaskResults:
# TODO: once we turn handlers into respective celery tasks, we should iterate
# here over *all* matching jobs and do them all, not just the first one
testing_farm_helper = TestingFarmJobHelper(
service_config=self.service_config,
package_config=self.package_config,
project=self.project,
metadata=self.data,
db_trigger=self.db_trigger,
job_config=self.job_config,
targets_override={self.chroot}
if self.chroot
else self.data.targets_override,
)
logger.debug(f"Test job config: {testing_farm_helper.job_tests}")
targets = list(testing_farm_helper.tests_targets)
logger.debug(f"Targets to run the tests: {targets}")
targets_without_build = []
targets_with_builds = {}
for target in targets:
if self.build_id:
copr_build = CoprBuildModel.get_by_id(self.build_id)
else:
copr_build = testing_farm_helper.get_latest_copr_build(
target=target, commit_sha=self.data.commit_sha
)
if copr_build:
targets_with_builds[target] = copr_build
else:
targets_without_build.append(target)
result_details = {}
# Trigger copr build for targets missing build
if targets_without_build:
logger.info(
f"Missing Copr build for targets {targets_without_build} in "
f"{testing_farm_helper.job_owner}/{testing_farm_helper.job_project}"
f" and commit:{self.data.commit_sha}, running a new Copr build."
)
for missing_target in targets_without_build:
testing_farm_helper.report_status_to_test_for_chroot(
state=BaseCommitStatus.pending,
description="Missing Copr build for this target, running a new Copr build.",
url="",
chroot=missing_target,
)
# monitor queued builds
for _ in range(len(targets_without_build)):
self.pushgateway.copr_builds_queued.inc()
event_data = self.data.get_dict()
event_data["targets_override"] = targets_without_build
signature(
TaskName.copr_build.value,
kwargs={
"package_config": dump_package_config(self.package_config),
"job_config": dump_job_config(self.job_config),
"event": event_data,
},
).apply_async()
result_details[
"msg"
] = f"Build triggered for targets {targets_without_build} missing a Copr build. "
failed = {}
for target, copr_build in targets_with_builds.items():
if copr_build.status != PG_COPR_BUILD_STATUS_SUCCESS:
logger.info(
"The latest build was not successful, not running tests for it."
)
testing_farm_helper.report_status_to_test_for_chroot(
state=BaseCommitStatus.failure,
description="The latest build was not successful, not running tests for it.",
chroot=target,
url=get_copr_build_info_url(copr_build.id),
)
continue
logger.info(f"Running testing farm for {copr_build}:{target}.")
self.pushgateway.test_runs_queued.inc()
result = testing_farm_helper.run_testing_farm(
build=copr_build, chroot=target
)
if not result["success"]:
failed[target] = result.get("details")
if not failed:
return TaskResults(success=True, details=result_details)
result_details["msg"] = (
result_details.setdefault("msg", "")
+ f"Failed testing farm targets: '{failed.keys()}'."
)
result_details.update(failed)
return TaskResults(success=False, details=result_details)
@configured_as(job_type=JobType.tests)
@reacts_to(event=TestingFarmResultsEvent)
class TestingFarmResultsHandler(JobHandler):
task_name = TaskName.testing_farm_results
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.result = (
TestingFarmResult(event.get("result")) if event.get("result") else None
)
self.pipeline_id = event.get("pipeline_id")
self.log_url = event.get("log_url")
self.copr_chroot = event.get("copr_chroot")
self.summary = event.get("summary")
self._db_trigger: Optional[AbstractTriggerDbType] = None
@property
def db_trigger(self) -> Optional[AbstractTriggerDbType]:
if not self._db_trigger:
run_model = TFTTestRunModel.get_by_pipeline_id(pipeline_id=self.pipeline_id)
if run_model:
self._db_trigger = run_model.get_trigger_object()
return self._db_trigger
def run(self) -> TaskResults:
logger.debug(f"Testing farm {self.pipeline_id} result:\n{self.result}")
test_run_model = TFTTestRunModel.get_by_pipeline_id(
pipeline_id=self.pipeline_id
)
if not test_run_model:
logger.warning(
f"Unknown pipeline_id received from the testing-farm: "
f"{self.pipeline_id}"
)
if test_run_model:
test_run_model.set_status(self.result)
if self.result == TestingFarmResult.running:
status = BaseCommitStatus.running
summary = self.summary or "Tests are running ..."
elif self.result == TestingFarmResult.passed:
status = BaseCommitStatus.success
summary = self.summary or "Tests passed ..."
elif self.result == TestingFarmResult.error:
status = BaseCommitStatus.error
summary = self.summary or "Error ..."
else:
status = BaseCommitStatus.failure
summary = self.summary or "Tests failed ..."
if self.result == TestingFarmResult.running:
self.pushgateway.test_runs_started.inc()
else:
self.pushgateway.test_runs_finished.inc()
test_run_time = (
datetime.now() - test_run_model.submitted_time
).total_seconds()
self.pushgateway.test_run_finished_time.observe(test_run_time)
if test_run_model:
test_run_model.set_web_url(self.log_url)
trigger = JobTriggerModel.get_or_create(
type=self.db_trigger.job_trigger_model_type,
trigger_id=self.db_trigger.id,
)
status_reporter = StatusReporter.get_instance(
project=self.project,
commit_sha=self.data.commit_sha,
trigger_id=trigger.id if trigger else None,
pr_id=self.data.pr_id,
)
status_reporter.report(
state=status,
description=summary,
url=get_testing_farm_info_url(test_run_model.id)
if test_run_model
else self.log_url,
links_to_external_services={"Testing Farm": self.log_url},
check_names=TestingFarmJobHelper.get_test_check(self.copr_chroot),
)
return TaskResults(success=True, details={})
|
from django.db import models
class AnswerSetUniqueManager(models.Manager):
def get_queryset(self):
return super(AnswerSetUniqueManager, self).get_queryset().filter(is_duplicate=False)
|
from flask import Flask
from flask_bootstrap import Bootstrap
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
app = create_app()
app.config['WTF_CSRF_ENABLED'] = True
from cantina import views
|
#!/usr/bin/env python
import os
import sys
import gzip
from Bio import SeqIO
modified = []
with gzip.GzipFile( sys.argv[1] ) as fh:
for record in SeqIO.parse( fh, "fasta" ):
record.id += "|" + str( len( record ) )
modified.append( record )
for record in modified:
record.name, record.description = "", ""
newname = os.path.split( sys.argv[1] )[1].replace( ".ffn.m8.gz", ".v0.1.1.ffn.gz" )
with gzip.GzipFile( newname, "w" ) as fh:
SeqIO.write( modified, fh, "fasta" )
|
""" Defines the ChacoShellError class.
"""
class ChacoShellError(RuntimeError):
""" Error raised by the Chaco shell.
"""
pass
# EOF
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from ssloop
# https://github.com/clowwindy/ssloop
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import time
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR',
'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES']
POLL_NULL = 0x00
POLL_IN = 0x01
POLL_OUT = 0x04
POLL_ERR = 0x08
POLL_HUP = 0x10
POLL_NVAL = 0x20
EVENT_NAMES = {
POLL_NULL: 'POLL_NULL',
POLL_IN: 'POLL_IN',
POLL_OUT: 'POLL_OUT',
POLL_ERR: 'POLL_ERR',
POLL_HUP: 'POLL_HUP',
POLL_NVAL: 'POLL_NVAL',
}
# we check timeouts every TIMEOUT_PRECISION seconds
TIMEOUT_PRECISION = 2
class KqueueLoop(object):
MAX_EVENTS = 1024
def __init__(self):
self._kqueue = select.kqueue()
self._fds = {}
def _control(self, fd, mode, flags):
events = []
if mode & POLL_IN:
events.append(select.kevent(fd, select.KQ_FILTER_READ, flags))
if mode & POLL_OUT:
events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags))
for e in events:
self._kqueue.control([e], 0)
def poll(self, timeout):
if timeout < 0:
timeout = None # kqueue behaviour
events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout)
results = defaultdict(lambda: POLL_NULL)
for e in events:
fd = e.ident
if e.filter == select.KQ_FILTER_READ:
results[fd] |= POLL_IN
elif e.filter == select.KQ_FILTER_WRITE:
results[fd] |= POLL_OUT
return results.items()
def register(self, fd, mode):
self._fds[fd] = mode
self._control(fd, mode, select.KQ_EV_ADD)
def unregister(self, fd):
self._control(fd, self._fds[fd], select.KQ_EV_DELETE)
del self._fds[fd]
def modify(self, fd, mode):
self.unregister(fd)
self.register(fd, mode)
def close(self):
self._kqueue.close()
class SelectLoop(object):
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select.select(self._r_list, self._w_list, self._x_list,
timeout)
results = defaultdict(lambda: POLL_NULL)
for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def register(self, fd, mode):
if mode & POLL_IN:
self._r_list.add(fd)
if mode & POLL_OUT:
self._w_list.add(fd)
if mode & POLL_ERR:
self._x_list.add(fd)
def unregister(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify(self, fd, mode):
self.unregister(fd)
self.register(fd, mode)
def close(self):
pass
class EventLoop(object):
def __init__(self):
if hasattr(select, 'epoll'):
self._impl = select.epoll()
model = 'epoll'
elif hasattr(select, 'kqueue'):
self._impl = KqueueLoop()
model = 'kqueue'
elif hasattr(select, 'select'):
self._impl = SelectLoop()
model = 'select'
else:
raise Exception('can not find any available functions in select '
'package')
self._fdmap = {} # (f, handler)
self._last_time = time.time()
self._periodic_callbacks = []
self._stopping = False
logging.debug('using event model: %s', model)
def poll(self, timeout=None):
events = self._impl.poll(timeout)
return [(self._fdmap[fd][0], fd, event) for fd, event in events]
def add(self, f, mode, handler):
fd = f.fileno()
self._fdmap[fd] = (f, handler)
self._impl.register(fd, mode)
def remove(self, f):
fd = f.fileno()
del self._fdmap[fd]
self._impl.unregister(fd)
def removefd(self, fd):
try:
if fd in self._fdmap:
del self._fdmap[fd]
self._impl.unregister(fd)
else:
logging.error('mykeyerror:%s', fd)
except Exception as e:
logging.error(e)
def add_periodic(self, callback):
self._periodic_callbacks.append(callback)
def remove_periodic(self, callback):
self._periodic_callbacks.remove(callback)
def modify(self, f, mode):
fd = f.fileno()
self._impl.modify(fd, mode)
def stop(self):
self._stopping = True
def run(self):
events = []
while not self._stopping:
asap = False
try:
events = self.poll(TIMEOUT_PRECISION)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
asap = True
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
handle = False
for sock, fd, event in events:
handler = self._fdmap.get(fd, None)
if handler is not None:
handler = handler[1]
try:
handle = handler.handle_event(sock, fd, event) or handle
except (OSError, IOError) as e:
shell.print_exception(e)
now = time.time()
if asap or now - self._last_time >= TIMEOUT_PRECISION:
for callback in self._periodic_callbacks:
callback()
self._last_time = now
if events and not handle:
time.sleep(0.001)
def __del__(self):
self._impl.close()
# from tornado
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
# from tornado
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number))
|
""" Plugin for file-based transformers (bash and Docker transformers)
NOTE: there are some semantic differences with executor.py of the bash- and docker-transformers
for local job execution as implemented by Seamless itself.
This is because in Jobless, we are dealing with buffers (potentially already in a file)
and local job execution in Seamless deals with deserialized values (from Seamless pins)
Probably keep the current code as the proper semantics, and adapt Seamless pin implementation
(to provide buffers instead of values if so specified)
"""
import asyncio
import json
import traceback
import os
import shutil
from . import TransformerPlugin, CacheMissError
class FileTransformerPluginBase(TransformerPlugin):
def __init__(self, *args, rewriter=None, **kwargs):
self.rewriter = rewriter
super().__init__(*args, **kwargs)
REQUIRED_TRANSFORMER_PINS = [] # to be defined in subclass
TRANSFORMER_CODE_CHECKSUMS = [] # to be defined in subclass
def allowed_docker_image(self, docker_image):
return False
def allowed_powers(self, powers):
return powers is None or len(powers) == 0
def required_pin_handler(self, pin, transformation):
"""Obtain the value of required pins (such as bashcode etc.)
To be re-implemented by the subclass
return tuple (skip, json_value_only, json_buffer, write_env)
skip: if True, skip the pin altogether
json_value_only: if True, the buffer must be interpreted as JSON,
and does not need to be written to file
json_buffer: if True, the buffer must be interpreted as JSON,
then written to a new file.
if False, an existing filename for the pin may be
obtained from the database client and used;
else, the buffer will be written as-is to a new file
if None, the buffer (which must be in mixed format)
will be checked for JSON content (pure-plain).
write_env: if True, the content of the buffer will be UTF8-decoded,
cast to string, and written as an environment variable
of the same name as the pin.
if None, the above will be done but only if the buffer has
less than 1000 characters.
"""
raise NotImplementedError
def can_accept_transformation(self, checksum, transformation):
env = None
if "__env__" in transformation:
env_buf = self.database_client.get_buffer(transformation["__env__"])
if env_buf is None:
return -1
try:
env = json.loads(env_buf)
except:
return -1
powers = env.get("powers")
if not self.allowed_powers(powers):
return -1
docker_image = env.get("docker", {}).get("name")
if docker_image is not None:
if not self.allowed_docker_image(docker_image):
return -1
for key in self.REQUIRED_TRANSFORMER_PINS:
missed = True
if isinstance(key, tuple):
keylist = key
else:
keylist = (key,)
for key in keylist:
if key in transformation:
missed = False
break
if missed:
return -1
if not "code" in transformation:
return -1
code = transformation["code"]
if not isinstance(code, list) or len(code) != 3 or code[:2] != ['python', 'transformer']:
return -1
code_checksum = code[-1]
return code_checksum in self.TRANSFORMER_CODE_CHECKSUMS
def prepare_transformation(self, checksum, transformation):
tdict = {"__checksum__": checksum.hex()}
if "__env__" in transformation:
env_buf = self.database_client.get_buffer(transformation["__env__"])
env = json.loads(env_buf)
tdict["__env__"] = env
required_transformer_pins = []
for key in self.REQUIRED_TRANSFORMER_PINS:
if isinstance(key, str):
required_transformer_pins.append(key)
else:
required_transformer_pins += list(key)
for pin in transformation:
if pin == "code" or (pin.startswith("__") and pin.endswith("__")):
continue
celltype, subcelltype, pin_checksum = transformation[pin]
json_value_only = False
skip, json_buffer, write_env = None, None, None
if pin in required_transformer_pins:
skip, json_value_only, json_buffer, write_env = self.required_pin_handler(pin, transformation)
elif celltype == "mixed":
skip = False
json_buffer = None
write_env = None
elif celltype == "plain":
skip = False
json_buffer = True
write_env = None
else:
skip = False
json_buffer = False
write_env = None
if skip:
continue
value = None
pin_buf = None
if json_buffer == False and write_env is None:
pin_buf_len = self.database_client.get_buffer_length(pin_checksum)
if pin_buf_len is not None:
if pin_buf_len <= 1000:
write_env = True
else:
write_env = False
if (json_buffer is None or json_buffer == True) or \
(write_env is None or write_env == True) or json_value_only:
pin_buf = self.database_client.get_buffer(pin_checksum)
if pin_buf is None:
raise CacheMissError(pin_checksum)
if write_env is None:
if len(pin_buf) <= 1000:
write_env = True
else:
write_env = False
if json_buffer is None:
assert celltype == "mixed"
json_buffer = is_json(pin_buf)
if json_value_only:
if celltype in ("plain", "mixed", "int", "float", "bool", "str"):
value = json.loads(pin_buf)
elif celltype in ("text", "python", "ipython", "cson", "yaml", "checksum"):
value = pin_buf.decode()
else:
value = pin_buf
elif json_buffer:
if pin_buf[:1] == b'"' and pin_buf[-2:-1] == b'"':
value = json.loads(pin_buf)
json_value_only = True
elif pin_buf[-1:] != b'\n':
value = json.loads(pin_buf)
json_value_only = True
else:
pass # we can use the buffer directly
filename = None
env_value = None
if write_env:
env_value = value
if isinstance(env_value, (list, dict)):
env_value = None
elif env_value is None:
env_value = pin_buf.decode()
if json_buffer:
env_value = json.loads(env_value)
if isinstance(env_value, (list, dict)):
env_value = None
else:
env_value = str(env_value).rstrip("\n")
if not json_value_only:
"""
### Disable this for now, for security reasons...
filename = self.database_client.get_filename(pin_checksum)
"""
filename = None ###
if filename is None:
pin_buf = self.database_client.get_buffer(pin_checksum)
if pin_buf is None:
raise CacheMissError
value = pin_buf
elif self.rewriter is not None:
pre, post = self.rewriter
if filename.startswith(pre):
tail = filename[len(pre):]
filename = post + tail
tdict[pin] = filename, value, env_value
return tdict
MAGIC_NUMPY = b"\x93NUMPY"
MAGIC_SEAMLESS_MIXED = b'\x94SEAMLESS-MIXED'
def is_json(data):
"""Poor man's version of mixed_deserialize + get_form
Maybe use this in real bash transformers as well?
"""
assert isinstance(data, bytes)
if data.startswith(MAGIC_NUMPY):
return False
elif data.startswith(MAGIC_SEAMLESS_MIXED):
return False
else: # pure json
return True
def write_files(prepared_transformation, env, support_symlinks):
for pin in prepared_transformation:
if pin in ("__checksum__", "__env__"):
continue
filename, value, env_value = prepared_transformation[pin]
pinfile = "./" + pin
if filename is not None:
if support_symlinks:
os.symlink(filename, pinfile)
else:
try:
os.link(filename, pinfile)
except Exception:
shutil.copy(filename, pinfile)
elif value is not None:
if isinstance(value, bytes):
with open(pinfile, "bw") as f:
f.write(value)
elif isinstance(value, str):
with open(pinfile, "w") as f:
f.write(value)
f.write("\n")
else:
with open(pinfile, "w") as f:
json.dump(value, f)
f.write("\n")
if env_value is not None:
env[pin] = str(env_value)
|
#!/usr/bin/env python
import pygame
from sprite import *
from megaman_object import *
import universal_var
import timer
from mega_stack import Stack
import projectile
class Death_orb(projectile.Projectile):
all_orbs_lst = []
all_orbs_stack = Stack()
def __init__(self):
x, y = 0, 0
width = 50
height = 50
display_layer = 4
is_active = False
orb_animation = [universal_var.misc_images['explosion_1'], universal_var.misc_images['explosion_2'], universal_var.misc_images['explosion_3'],
universal_var.misc_images['explosion_4'], universal_var.misc_images['explosion_5'], universal_var.misc_images['blank']]
orb_sprite = Sprite(universal_var.main_sprite, x, y, width, height, [('orb', orb_animation, 20)])
super().__init__('Death_orb', x, y, [orb_sprite], None, is_active, width, height, display_layer)
self.spawned = False
self.vel = 0
self.all_timers = timer.Timer()
self.all_timers.add_ID('start_time', 0)
Death_orb.add_to_class_lst(self, Death_orb.all_orbs_lst, self.ID)
Death_orb.all_orbs_stack.push(self)
def update(self):
if self.spawned == True and self.all_timers.is_finished('start_time'):
self.set(self.x, self.y, self.vel, self.angle)
self.spawned = False
self.all_timers.replenish_timer('start_time')
elif self.spawned == True:
self.all_timers.countdown('start_time')
elif self not in Death_orb.all_orbs_stack.lst:
Death_orb.all_orbs_stack.push(self)
projectile.Projectile.update(self)
#-----------------------------------------------------------------------
@staticmethod
def init(amount=1):
for i in range(amount):
Death_orb()
@classmethod
def set_orb_active(cls, x, y, start_time, angle, vel):
orb = cls.all_orbs_stack.pop()
orb.x, orb.y = x, y
orb.angle = angle
orb.vel = vel
orb.spawned = True
orb.all_timers.replenish_timer('start_time', start_time)
@classmethod
def reset(cls):
for orb in cls.all_orbs_lst:
orb.is_active = False
orb.launched = False
orb.spawned = False
sprite = orb.get_sprite(universal_var.main_sprite)
sprite.current_frame = 0
|
"""!
@author atomicfruitcake
@date 2019
"""
|
import argparse
import multiprocessing
from datetime import datetime
from course_lib.Base.Evaluation.Evaluator import *
from course_lib.Base.IR_feature_weighting import TF_IDF
from course_lib.GraphBased.P3alphaRecommender import P3alphaRecommender
from course_lib.GraphBased.RP3betaRecommender import RP3betaRecommender
from course_lib.KNN.ItemKNNCBFRecommender import ItemKNNCBFRecommender
from course_lib.KNN.ItemKNNCFRecommender import ItemKNNCFRecommender
from course_lib.KNN.UserKNNCFRecommender import UserKNNCFRecommender
from course_lib.MatrixFactorization.Cython.MatrixFactorization_Cython import MatrixFactorization_AsySVD_Cython
from course_lib.MatrixFactorization.NMFRecommender import NMFRecommender
from course_lib.SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from course_lib.SLIM_ElasticNet.SLIMElasticNetRecommender import SLIMElasticNetRecommender
from scripts.scripts_utils import set_env_variables, read_split_load_data
from src.data_management.New_DataSplitter_leave_k_out import *
from src.data_management.data_reader import get_ICM_train_new, get_UCM_train_new, get_ignore_users
from src.model.KNN.ItemKNNDotCFRecommender import ItemKNNDotCFRecommender
from src.model.KNN.ItemKNNCBFCFRecommender import ItemKNNCBFCFRecommender
from src.model.KNN.NewItemKNNCBFRecommender import NewItemKNNCBFRecommender
from src.model.KNN.NewUserKNNCFRecommender import NewUserKNNCFRecommender
from src.model.KNN.UserKNNCBFCFRecommender import UserKNNCBFCFRecommender
from src.model.KNN.UserKNNCBFRecommender import UserKNNCBFRecommender
from src.model.KNN.UserKNNDotCFRecommender import UserKNNDotCFRecommender
from src.model.MatrixFactorization.FunkSVDRecommender import FunkSVDRecommender
from src.model.MatrixFactorization.ImplicitALSRecommender import ImplicitALSRecommender
from src.model.MatrixFactorization.LightFMRecommender import LightFMRecommender
from src.model.MatrixFactorization.LogisticMFRecommender import LogisticMFRecommender
from src.model.MatrixFactorization.MF_BPR_Recommender import MF_BPR_Recommender
from src.model.MatrixFactorization.NewPureSVDRecommender import NewPureSVDRecommender
from src.tuning.cross_validation.run_cv_parameter_search import run_cv_parameter_search
from src.utils.general_utility_functions import get_split_seed, get_seed_lists, \
get_root_data_path, str2bool
N_CASES = 100
N_RANDOM_STARTS = 40
CUTOFF = 10
N_FOLDS = 5
K_OUT = 1
ALLOW_COLD_USERS = False
MIN_LOWER_THRESHOLD = -1
MAX_UPPER_THRESHOLD = 2 ** 16 - 1
IGNORE_NON_TARGET_USERS = True
AGE_TO_KEEP = [] # Default []
SIDE_INFO_CLASS_DICT = {
# Graph-Based
"rp3beta_side": RP3betaRecommender,
# ML-Based
"pure_svd_side": NewPureSVDRecommender,
"slim_side": SLIM_BPR_Cython,
"ials_side": ImplicitALSRecommender
}
COLLABORATIVE_RECOMMENDER_CLASS_DICT = {
# KNN
"item_cf": ItemKNNCFRecommender,
"user_cf": UserKNNCFRecommender,
"new_user_cf": NewUserKNNCFRecommender,
"user_dot_cf": UserKNNDotCFRecommender,
"item_dot_cf": ItemKNNDotCFRecommender,
# ML Item-Similarity Based
"slim_bpr": SLIM_BPR_Cython,
"slim_elastic": SLIMElasticNetRecommender,
# Graph-based
"p3alpha": P3alphaRecommender,
"rp3beta": RP3betaRecommender,
# Matrix Factorization
"pure_svd": NewPureSVDRecommender,
"light_fm": LightFMRecommender,
"ials": ImplicitALSRecommender,
"logistic_mf": LogisticMFRecommender,
"mf_bpr": MF_BPR_Recommender,
"funk_svd": FunkSVDRecommender,
"asy_svd": MatrixFactorization_AsySVD_Cython,
"nmf": NMFRecommender
}
CONTENT_RECOMMENDER_CLASS_DICT = {
# Pure CBF KNN
"new_item_cbf": NewItemKNNCBFRecommender,
"item_cbf_cf": ItemKNNCBFCFRecommender,
"item_cbf_all": ItemKNNCBFRecommender
}
DEMOGRAPHIC_RECOMMENDER_CLASS_DICT = {
# Pure Demographic KNN
"user_cbf": UserKNNCBFRecommender,
"user_cbf_cf": UserKNNCBFCFRecommender
}
RECOMMENDER_CLASS_DICT = dict(**COLLABORATIVE_RECOMMENDER_CLASS_DICT, **CONTENT_RECOMMENDER_CLASS_DICT,
**DEMOGRAPHIC_RECOMMENDER_CLASS_DICT, **SIDE_INFO_CLASS_DICT)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--reader_path", default=get_root_data_path(), help="path to the root of data files")
parser.add_argument("-r", "--recommender_name", required=True,
help="recommender names should be one of: {}".format(list(RECOMMENDER_CLASS_DICT.keys())))
parser.add_argument("-n", "--n_cases", default=N_CASES, type=int, help="number of cases for hyper parameter tuning")
parser.add_argument("-f", "--n_folds", default=N_FOLDS, type=int, help="number of folds for cross validation")
parser.add_argument("-nr", "--n_random_starts", default=N_RANDOM_STARTS, type=int,
help="number of random starts for hyper parameter tuning")
parser.add_argument("-p", "--parallelize", default=1, type=str2bool,
help="1 to parallelize the search, 0 otherwise")
parser.add_argument("-ut", "--upper_threshold", default=MAX_UPPER_THRESHOLD, type=int,
help="Upper threshold (included) of user profile length to validate")
parser.add_argument("-lt", "--lower_threshold", default=MIN_LOWER_THRESHOLD, type=int,
help="Lower threshold (included) of user profile length to validate")
parser.add_argument("-acu", "--allow_cold_users", default=0, type=str2bool, help="1 to allow cold users,"
" 0 otherwise")
parser.add_argument("-ent", "--exclude_non_target", default=1, type=str2bool,
help="1 to exclude non-target users, 0 otherwise")
parser.add_argument("-nj", "--n_jobs", default=multiprocessing.cpu_count(), help="Number of workers", type=int)
parser.add_argument("--seed", default=get_split_seed(), help="seed for the experiment", type=int)
# parser.add_argument("-a", "--age", default=-69420, help="Validate only on the users of this region", type=int)
return parser.parse_args()
def main():
set_env_variables()
args = get_arguments()
seeds = get_seed_lists(args.n_folds, get_split_seed())
# --------- DATA LOADING SECTION --------- #
URM_train_list = []
ICM_train_list = []
UCM_train_list = []
evaluator_list = []
for fold_idx in range(args.n_folds):
# Read and split data
data_reader = read_split_load_data(K_OUT, args.allow_cold_users, seeds[fold_idx])
URM_train, URM_test = data_reader.get_holdout_split()
ICM_train, item_feature2range = get_ICM_train_new(data_reader)
UCM_train, user_feature2range = get_UCM_train_new(data_reader)
# Ignore users and setting evaluator
ignore_users = get_ignore_users(URM_train, data_reader.get_original_user_id_to_index_mapper(),
args.lower_threshold, args.upper_threshold,
ignore_non_target_users=args.exclude_non_target)
# Ignore users by age
# UCM_age = data_reader.get_UCM_from_name("UCM_age")
# age_feature_to_id_mapper = data_reader.dataReader_object.get_UCM_feature_to_index_mapper_from_name("UCM_age")
# age_demographic = get_user_demographic(UCM_age, age_feature_to_id_mapper, binned=True)
# ignore_users = np.unique(np.concatenate((ignore_users, get_ignore_users_age(age_demographic, AGE_TO_KEEP))))
URM_train_list.append(URM_train)
ICM_train_list.append(ICM_train)
UCM_train_list.append(UCM_train)
evaluator = EvaluatorHoldout(URM_test, cutoff_list=[CUTOFF], ignore_users=np.unique(ignore_users))
evaluator_list.append(evaluator)
# --------- HYPER PARAMETERS TUNING SECTION --------- #
print("Start tuning...")
hp_tuning_path = "../../../report/hp_tuning/" + args.recommender_name + "/"
date_string = datetime.now().strftime('%b%d_%H-%M-%S_k1_lt_{}/'.format(args.lower_threshold))
output_folder_path = hp_tuning_path + date_string
if args.recommender_name in COLLABORATIVE_RECOMMENDER_CLASS_DICT.keys():
run_cv_parameter_search(URM_train_list=URM_train_list,
recommender_class=RECOMMENDER_CLASS_DICT[args.recommender_name],
evaluator_validation_list=evaluator_list,
metric_to_optimize="MAP", output_folder_path=output_folder_path,
parallelize_search=args.parallelize, n_jobs=args.n_jobs,
n_cases=args.n_cases, n_random_starts=args.n_random_starts)
elif args.recommender_name in CONTENT_RECOMMENDER_CLASS_DICT.keys():
run_cv_parameter_search(URM_train_list=URM_train_list, ICM_train_list=ICM_train_list, ICM_name="ICM_all",
recommender_class=RECOMMENDER_CLASS_DICT[args.recommender_name],
evaluator_validation_list=evaluator_list,
metric_to_optimize="MAP", output_folder_path=output_folder_path,
parallelize_search=args.parallelize, n_jobs=args.n_jobs,
n_cases=args.n_cases, n_random_starts=args.n_random_starts)
elif args.recommender_name in DEMOGRAPHIC_RECOMMENDER_CLASS_DICT.keys():
run_cv_parameter_search(URM_train_list=URM_train_list, UCM_train_list=UCM_train_list, UCM_name="UCM_all",
recommender_class=RECOMMENDER_CLASS_DICT[args.recommender_name],
evaluator_validation_list=evaluator_list,
metric_to_optimize="MAP", output_folder_path=output_folder_path,
parallelize_search=args.parallelize, n_jobs=args.n_jobs,
n_cases=args.n_cases, n_random_starts=args.n_random_starts)
elif args.recommender_name in SIDE_INFO_CLASS_DICT:
temp_list = []
for i, URM in enumerate(URM_train_list):
temp = sps.vstack([URM, ICM_train_list[i].T], format="csr")
#temp = TF_IDF(temp).tocsr()
temp_list.append(temp)
run_cv_parameter_search(URM_train_list=temp_list,
recommender_class=RECOMMENDER_CLASS_DICT[args.recommender_name],
evaluator_validation_list=evaluator_list, metric_to_optimize="MAP",
output_folder_path=output_folder_path, parallelize_search=args.parallelize,
n_jobs=args.n_jobs, n_cases=args.n_cases, n_random_starts=args.n_random_starts)
print("...tuning ended")
if __name__ == '__main__':
main()
|
# Generated by Django 2.0.13 on 2019-05-28 14:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("django_geosource", "0002_auto_20190524_1137")]
operations = [
migrations.AlterModelOptions(
name="source",
options={"permissions": (("can_manage_sources", "Can manage sources"),)},
)
]
|
from unittest import TestCase
from main import strip_quotes
class TestStripQuotes(TestCase):
def test_strip_quotes_removes_quotes_from_string_margins(self):
self.assertEqual('string_with quotes',
strip_quotes('\'string_with quotes\''))
def test_strip_quotes_removes_double_quotes_from_string_edges(self):
self.assertEqual('string', strip_quotes('"string"'))
def test_strip_quotes_does_not_remove_from_center_of_string(self):
self.assertEqual('string_""with_quotes',
strip_quotes('string_""with_quotes'))
def test_strip_quotes_returns_passed_string_if_no_quotes(self):
self.assertEqual('string', strip_quotes('string'))
|
from sys import argv
script, data, setor, tipo, usuario, descricao, finalizado = argv
contador = open('contador.dat', 'r')
contadorInt = int(contador.read())
contadorInt = contadorInt + 1
contadorStr = str(contadorInt)
contadorUpt = open('contador.dat', 'w')
contadorUpt.write(contadorStr)
contadorUpt.close
target = open('dados.dat', 'a')
linha = (contadorStr + '|'
+ data + '|' + setor + '|' + tipo + '|' + usuario
+ '|' + descricao + '|' + finalizado.upper() + '\n')
target.write(linha)
target.close
|
import json
import random
import sys
import numpy as np
import torch
import unidecode
from tqdm import tqdm
__all__ = [
"Dataloader",
]
ENTITY_PAIR_TYPE_SET = set(
[("Chemical", "Disease"), ("Chemical", "Gene"), ("Gene", "Disease")])
def map_index(chars, tokens):
# position index mapping from character level offset to token level offset
ind_map = {}
i, k = 0, 0 # (character i to token k)
len_char = len(chars)
num_token = len(tokens)
while k < num_token:
if i < len_char and chars[i].strip() == "":
ind_map[i] = k
i += 1
continue
token = tokens[k]
if token[:2] == "##":
token = token[2:]
if token[:1] == "Ġ":
token = token[1:]
# assume that unk is always one character in the input text.
if token != chars[i:(i+len(token))]:
ind_map[i] = k
i += 1
k += 1
else:
for _ in range(len(token)):
ind_map[i] = k
i += 1
k += 1
return ind_map
def preprocess(data_entry, tokenizer, max_text_length, relation_map, lower=True):
"""convert to index array, cut long sentences, cut long document, pad short sentences, pad short document"""
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
padid = tokenizer.pad_token_id
cls_token_length = len(cls_token)
docid = data_entry["docid"]
if "title" in data_entry and "abstract" in data_entry:
text = data_entry["title"] + data_entry["abstract"]
if lower == True:
text = text.lower()
else:
text = data_entry["text"]
if lower == True:
text = text.lower()
entities_info = data_entry["entity"]
relations_info = data_entry["relation"]
rel_vocab_size = len(relation_map)
# tokenizer will automatically add cls and sep at the beginning and end of each sentence
# [CLS] --> 101, [PAD] --> 0, [SEP] --> 102, [UNK] --> 100
text = unidecode.unidecode(text)
tokens = tokenizer.tokenize(text)[:(max_text_length-2)]
tokens = [cls_token] + tokens + [sep_token]
token_wids = tokenizer.convert_tokens_to_ids(tokens)
text = cls_token + " " + text + " " + sep_token
input_array = np.ones(max_text_length, dtype=np.int) * int(padid)
input_length = len(token_wids)
input_array[0:len(token_wids)] = token_wids
pad_array = np.array(input_array != padid, dtype=np.long)
ind_map = map_index(text, tokens)
sys.stdout.flush()
entity_indicator = {}
entity_type = {}
entity_id_set = set([])
for entity in entities_info:
# if entity mention is outside max_text_length, ignore. +6 indicates additional offset due to "[CLS] "
entity_id_set.add(entity["id"])
entity_type[entity["id"]] = entity["type"]
if entity["id"] not in entity_indicator:
entity_indicator[entity["id"]] = np.zeros(max_text_length)
if entity["start"] + cls_token_length in ind_map:
startid = ind_map[entity["start"] + cls_token_length]
else:
startid = 0
if entity["end"] + cls_token_length in ind_map:
endid = ind_map[entity["end"] + cls_token_length]
endid += 1
else:
endid = 0
if startid >= endid:
endid = startid + 1
entity_indicator[entity["id"]][startid:endid] = 1
relations_vector = {}
relations = {}
for rel in relations_info:
rel_type, e1, e2 = rel["type"], rel["subj"], rel["obj"]
if e1 in entity_indicator and e2 in entity_indicator:
if (e1, e2) not in relations_vector:
relations_vector[(e1, e2)] = np.zeros(rel_vocab_size)
if rel_type in relation_map:
# NA should not be in the relation_map to generate all-zero vector.
relations_vector[(e1, e2)][relation_map[rel_type]] = 1
if (e1, e2) not in relations:
relations[(e1, e2)] = []
relations[(e1, e2)].append(rel_type)
label_vectors = []
label_names = []
e1_indicators = []
e2_indicators = []
e1s, e2s = [], []
e1_types = []
e2_types = []
# in this mode, NA relation label occurs either when it is shown in the data, or there is no label between the pair
for e1 in list(entity_id_set):
for e2 in list(entity_id_set):
if (entity_type[e1], entity_type[e2]) in ENTITY_PAIR_TYPE_SET and e1 != "-" and e2 != "-":
e1s.append(e1)
e2s.append(e2)
e1_indicator, e2_indicator = entity_indicator[e1], entity_indicator[e2]
if (e1, e2) in relations_vector:
label_vector = relations_vector[(e1, e2)]
else:
label_vector = np.zeros(rel_vocab_size)
# if len(label_vector) != 14:
# print(label_vector)
sys.stdout.flush()
label_vectors.append(label_vector)
if (e1, e2) in relations:
label_name = relations[(e1, e2)]
else:
label_name = []
label_names.append(label_name)
e1_indicators.append(e1_indicator)
e2_indicators.append(e2_indicator)
e1_types.append(entity_type[e1])
e2_types.append(entity_type[e2])
return {"input": input_array, "pad": pad_array, "docid": docid, "input_length": input_length,
"label_vectors": label_vectors, "label_names": label_names,
"e1_indicators": e1_indicators, "e2_indicators": e2_indicators,
"e1s": e1s, "e2s": e2s, "e1_types": e1_types, "e2_types": e2_types
}
class Dataloader(object):
"""Dataloader"""
def __init__(self, data_path, tokenizer, seed=0, max_text_length=512, training=False, logger=None, lowercase=True):
# shape of input for each batch: (batchsize, max_text_length, max_sent_length)
self.train = []
self.val = []
self.test_ctd = []
self.test_ds_ctd = []
self.test_anno_ctd = []
self.test_anno_all = []
self.tokenizer = tokenizer
self.logger = logger
self.relation_map = json.loads(
open(data_path + "/relation_map.json").read())
self.relation_name = dict([(i, r)
for r, i in self.relation_map.items()])
def calculate_stat(data):
num_pos_rels = 0
num_neg_pairs = 0
num_pos_pairs = 0
per_rel_stat = {}
entity_type_pair_stat = {}
for d in data:
for i, (rel_names, e1t, e2t) in enumerate(list(zip(d["label_names"], d["e1_types"], d["e2_types"]))):
for rel_name in rel_names:
if rel_name not in per_rel_stat:
per_rel_stat[rel_name] = 0
if (e1t, e2t) not in entity_type_pair_stat:
entity_type_pair_stat[(e1t, e2t)] = {
"num_pos_pairs": 0, "num_neg_pairs": 0, "num_pos_rels": 0}
num_pos_ = d["label_vectors"][i].sum()
if num_pos_ == 0:
num_neg_pairs += 1
entity_type_pair_stat[(e1t, e2t)]["num_neg_pairs"] += 1
else:
num_pos_rels += num_pos_
num_pos_pairs += 1
entity_type_pair_stat[(
e1t, e2t)]["num_pos_rels"] += num_pos_
entity_type_pair_stat[(
e1t, e2t)]["num_pos_pairs"] += 1
for rel_name in rel_names:
per_rel_stat[rel_name] += 1
return num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat
if training == True:
with open(data_path + "/train.json") as f:
# try:
train_json = json.loads(f.read())
for data in tqdm(train_json[:]):
processed_data = preprocess(
data, tokenizer, max_text_length, self.relation_map, lowercase)
# print(processed_data)
sys.stdout.flush()
if processed_data["label_vectors"] == []:
continue
self.train.append(processed_data)
num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat = calculate_stat(
self.train)
self.logger.info(f"=======================================")
self.logger.info(f"Training: # of docs = {len(train_json)}")
self.logger.info(
f" # of positive pairs = {num_pos_pairs}")
self.logger.info(
f" # of positive labels = {num_pos_rels}")
self.logger.info(
f" # of negative pairs = {num_neg_pairs}")
self.logger.info(f"---------------------------------------")
for e1t, e2t in entity_type_pair_stat:
self.logger.info(
f" ({e1t}, {e2t}): # of positive pairs = {entity_type_pair_stat[(e1t, e2t)]['num_pos_pairs']}")
self.logger.info(
f" ({e1t}, {e2t}): # of positive labels = {entity_type_pair_stat[(e1t, e2t)]['num_pos_rels']}")
self.logger.info(
f" ({e1t}, {e2t}): # of negative pairs = {entity_type_pair_stat[(e1t, e2t)]['num_neg_pairs']}")
self.logger.info(f"---------------------------------------")
for rel_name in per_rel_stat:
self.logger.info(
f" {rel_name}: # of labels = {per_rel_stat[rel_name]}")
self.logger.info(f"=======================================")
with open(data_path + "/valid.json") as f:
valid_json = json.loads(f.read())
for data in tqdm(valid_json[:]):
processed_data = preprocess(
data, tokenizer, max_text_length, self.relation_map, lowercase)
if processed_data["label_vectors"] == []:
continue
self.val.append(processed_data)
num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat = calculate_stat(
self.val)
self.logger.info(f"=======================================")
self.logger.info(f"Valid: # of docs = {len(valid_json)}")
self.logger.info(
f" # of positive pairs = {num_pos_pairs}")
self.logger.info(
f" # of positive labels = {num_pos_rels}")
self.logger.info(
f" # of negative pairs = {num_neg_pairs}")
self.logger.info(f"---------------------------------------")
for e1t, e2t in entity_type_pair_stat:
self.logger.info(
f" ({e1t}, {e2t}): # of positive pairs = {entity_type_pair_stat[(e1t, e2t)]['num_pos_pairs']}")
self.logger.info(
f" ({e1t}, {e2t}): # of positive labels = {entity_type_pair_stat[(e1t, e2t)]['num_pos_rels']}")
self.logger.info(
f" ({e1t}, {e2t}): # of negative pairs = {entity_type_pair_stat[(e1t, e2t)]['num_neg_pairs']}")
self.logger.info(f"---------------------------------------")
for rel_name in per_rel_stat:
self.logger.info(
f" {rel_name}: # of labels = {per_rel_stat[rel_name]}")
self.logger.info(f"=======================================")
with open(data_path + "/test.json") as f:
test_json = json.loads(f.read())
for data in tqdm(test_json[:]):
processed_data = preprocess(
data, tokenizer, max_text_length, self.relation_map, lowercase)
if processed_data["label_vectors"] == []:
continue
self.test_ctd.append(processed_data)
num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat = calculate_stat(
self.test_ctd)
self.logger.info(f"=======================================")
self.logger.info(f"Test ctd: # of docs = {len(test_json)}")
self.logger.info(
f" # of positive pairs = {num_pos_pairs}")
self.logger.info(
f" # of positive labels = {num_pos_rels}")
self.logger.info(
f" # of negative pairs = {num_neg_pairs}")
self.logger.info(f"---------------------------------------")
for e1t, e2t in entity_type_pair_stat:
self.logger.info(
f" ({e1t}, {e2t}): # of positive pairs = {entity_type_pair_stat[(e1t, e2t)]['num_pos_pairs']}")
self.logger.info(
f" ({e1t}, {e2t}): # of positive labels = {entity_type_pair_stat[(e1t, e2t)]['num_pos_rels']}")
self.logger.info(
f" ({e1t}, {e2t}): # of negative pairs = {entity_type_pair_stat[(e1t, e2t)]['num_neg_pairs']}")
self.logger.info(f"---------------------------------------")
for rel_name in per_rel_stat:
self.logger.info(
f" {rel_name}: # of labels = {per_rel_stat[rel_name]}")
self.logger.info(f"=======================================")
with open(data_path + "/test.anno_ctd.json") as f:
test_json = json.loads(f.read())
for data in tqdm(test_json[:]):
processed_data = preprocess(
data, tokenizer, max_text_length, self.relation_map, lowercase)
if processed_data["label_vectors"] == []:
continue
self.test_anno_ctd.append(processed_data)
num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat = calculate_stat(
self.test_anno_ctd)
self.logger.info(f"=======================================")
self.logger.info(
f"Test annotated ctd: # of docs = {len(test_json)}")
self.logger.info(
f" # of positive pairs = {num_pos_pairs}")
self.logger.info(
f" # of positive labels = {num_pos_rels}")
self.logger.info(
f" # of negative pairs = {num_neg_pairs}")
self.logger.info(f"---------------------------------------")
for e1t, e2t in entity_type_pair_stat:
self.logger.info(
f" ({e1t}, {e2t}): # of positive pairs = {entity_type_pair_stat[(e1t, e2t)]['num_pos_pairs']}")
self.logger.info(
f" ({e1t}, {e2t}): # of positive labels = {entity_type_pair_stat[(e1t, e2t)]['num_pos_rels']}")
self.logger.info(
f" ({e1t}, {e2t}): # of negative pairs = {entity_type_pair_stat[(e1t, e2t)]['num_neg_pairs']}")
self.logger.info(f"---------------------------------------")
for rel_name in per_rel_stat:
self.logger.info(
f" {rel_name}: # of labels = {per_rel_stat[rel_name]}")
self.logger.info(f"=======================================")
with open(data_path + "/test.anno_all.json") as f:
test_json = json.loads(f.read())
for data in tqdm(test_json[:]):
processed_data = preprocess(
data, tokenizer, max_text_length, self.relation_map, lowercase)
if processed_data["label_vectors"] == []:
continue
self.test_anno_all.append(processed_data)
num_pos_rels, num_pos_pairs, num_neg_pairs, entity_type_pair_stat, per_rel_stat = calculate_stat(
self.test_anno_all)
self.logger.info(f"=======================================")
self.logger.info(
f"Test annotated all: # of docs = {len(test_json)}")
self.logger.info(
f" # of positive pairs = {num_pos_pairs}")
self.logger.info(
f" # of positive labels = {num_pos_rels}")
self.logger.info(
f" # of negative pairs = {num_neg_pairs}")
self.logger.info(f"---------------------------------------")
for e1t, e2t in entity_type_pair_stat:
self.logger.info(
f" ({e1t}, {e2t}): # of positive pairs = {entity_type_pair_stat[(e1t, e2t)]['num_pos_pairs']}")
self.logger.info(
f" ({e1t}, {e2t}): # of positive labels = {entity_type_pair_stat[(e1t, e2t)]['num_pos_rels']}")
self.logger.info(
f" ({e1t}, {e2t}): # of negative pairs = {entity_type_pair_stat[(e1t, e2t)]['num_neg_pairs']}")
self.logger.info(f"---------------------------------------")
for rel_name in per_rel_stat:
self.logger.info(
f" {rel_name}: # of labels = {per_rel_stat[rel_name]}")
self.logger.info(f"=======================================")
self.max_text_length = max_text_length
self._bz = 1
self._datasize = len(self.train)
self._idx = 0
self.num_trained_data = 0
random.seed(seed)
random.shuffle(self.train)
def __len__(self):
return self._datasize
def __iter__(self):
# {"input": input_array, "pad": pad_array, "docid": docid, "input_length": input_length,
# "label_vectors": label_vectors, "label_names": label_names,
# "e1_indicators": e1_indicators, "e2_indicators": e2_indicators,
# "e1s": e1s, "e2s": e2s, "e1_types": e1_types, "e2_types": e2_types
# }
while True:
if self._idx + self._bz > self._datasize:
random.shuffle(self.train)
self._idx = 0
data = self.train[self._idx:(self._idx+self._bz)][0]
self._idx += self._bz
input_array, pad_array, label_array, ep_masks, e1_indicators, e2_indicators = [
], [], [], [], [], []
input_lengths = []
input_array.append(data["input"])
pad_array.append(data["pad"])
input_lengths.append(data["input_length"])
if len(data["label_vectors"]) > 100:
shuffle_indexes = np.arange(len(data["label_vectors"]))
np.random.shuffle(shuffle_indexes)
shuffle_indexes = shuffle_indexes[:100]
else:
shuffle_indexes = np.arange(len(data["label_vectors"]))
label_array_ = np.array(data["label_vectors"])[shuffle_indexes]
label_array.append(label_array_)
e1_indicators_ = np.array(data["e1_indicators"])[shuffle_indexes]
e1_indicators.append(e1_indicators_)
e2_indicators_ = np.array(data["e2_indicators"])[shuffle_indexes]
e2_indicators.append(e2_indicators_)
# (text_length, text_length)
ep_masks_ = []
for e1_indicator, e2_indicator in list(zip(list(e1_indicators_), list(e2_indicators_))):
ep_mask_ = np.full(
(self.max_text_length, self.max_text_length), -1e20)
ep_outer = 1 - np.outer(e1_indicator, e2_indicator)
ep_mask_ = ep_mask_ * ep_outer
ep_masks_.append(ep_mask_)
ep_masks_ = np.array(ep_masks_)
ep_masks.append(ep_masks_)
max_length = int(np.max(input_lengths))
input_ids = torch.tensor(np.array(input_array)[
:, :max_length], dtype=torch.long)
attention_mask = torch.tensor(
np.array(pad_array)[:, :max_length], dtype=torch.long)
label_array = torch.tensor(
np.array(label_array), dtype=torch.float)
ep_masks = torch.tensor(
np.array(ep_masks)[:, :, :max_length, :max_length], dtype=torch.float)
e1_indicators = np.array(e1_indicators)
e2_indicators = np.array(e2_indicators)
e1_indicators = torch.tensor(e1_indicators[
:, :, :max_length], dtype=torch.float)
e2_indicators = torch.tensor(e2_indicators[
:, :, :max_length], dtype=torch.float)
self.num_trained_data += self._bz
return_data = (input_ids, attention_mask, ep_masks,
e1_indicators, e2_indicators, label_array)
yield self.num_trained_data, return_data
|
class Credentials():
# users list
credentials_list = []
'''
initialize a class
'''
def __init__(self,Fname,Sname,username,password,email):
'''
users class 'blueprint'
'''
self.Fname = Fname
self.Sname = Sname
self.username = username
self.password = password
self.email = email
def saveCredentials(self):
Credentials.credentials_list.append(self)
def delete(self):
Credentials.credentials_list.remove(self)
|
import sys
SIX_TAB = {
# six.moves: (py2, py3)
"configparser": ("ConfigParser", "configparser"),
"copyreg": ("copy_reg", "copyreg"),
"cPickle": ("cPickle", "pickle"),
"cStringIO": ("cStringIO", "io"),
"dbm_gnu": ("gdbm", "dbm.gnu"),
"_dummy_thread": ("dummy_thread", "_dummy_thread"),
"email_mime_multipart": ("email.MIMEMultipart", "email.mime.multipart"),
"email_mime_nonmultipart": ("email.MIMENonMultipart", "email.mime.nonmultipart"),
"email_mime_text": ("email.MIMEText", "email.mime.text"),
"email_mime_base": ("email.MIMEBase", "email.mime.base"),
"filter": ("itertools", None),
"filterfalse": ("itertools", "itertools"),
"getcwd": ("os", "os"),
"getcwdb": ("os", "os"),
"http_cookiejar": ("cookielib", "http.cookiejar"),
"http_cookies": ("Cookie", "http.cookies"),
"html_entities": ("htmlentitydefs", "html.entities"),
"html_parser": ("HTMLParser", "html.parser"),
"http_client": ("httplib", "http.client"),
"BaseHTTPServer": ("BaseHTTPServer", "http.server"),
"CGIHTTPServer": ("CGIHTTPServer", "http.server"),
"SimpleHTTPServer": ("SimpleHTTPServer", "http.server"),
"intern": (None, "sys"),
"map": ("itertools", None),
"queue": ("Queue", "queue"),
"reduce": (None, "functools"),
"reload_module": (None, "importlib"),
"reprlib": ("repr", "reprlib"),
"shlex_quote": ("pipes", "shlex"),
"socketserver": ("SocketServer", "socketserver"),
"_thread": ("thread", "_thread"),
"tkinter": ("Tkinter", "tkinter"),
"tkinter_dialog": ("Dialog", "tkinter.dialog"),
"tkinter_filedialog": ("FileDialog", "tkinter.FileDialog"),
"tkinter_scrolledtext": ("ScrolledText", "tkinter.scrolledtext"),
"tkinter_simpledialog": ("SimpleDialog", "tkinter.simpledialog"),
"tkinter_ttk": ("ttk", "tkinter.ttk"),
"tkinter_tix": ("Tix", "tkinter.tix"),
"tkinter_constants": ("Tkconstants", "tkinter.constants"),
"tkinter_dnd": ("Tkdnd", "tkinter.dnd"),
"tkinter_colorchooser": ("tkColorChooser", "tkinter.colorchooser"),
"tkinter_commondialog": ("tkCommonDialog", "tkinter.commondialog"),
"tkinter_tkfiledialog": ("tkFileDialog", "tkinter.filedialog"),
"tkinter_font": ("tkFont", "tkinter.font"),
"tkinter_messagebox": ("tkMessageBox", "tkinter.messagebox"),
"tkinter_tksimpledialog": ("tkSimpleDialog", "tkinter.simpledialog"),
"urllib.robotparser": ("robotparser", "urllib.robotparser"),
"urllib_robotparser": ("robotparser", "urllib.robotparser"),
"UserDict": ("UserDict", "collections"),
"UserList": ("UserList", "collections"),
"UserString": ("UserString", "collections"),
"winreg": ("_winreg", "winreg"),
"xmlrpc_client": ("xmlrpclib", "xmlrpc.client"),
"xmlrpc_server": ("SimpleXMLRPCServer", "xmlrpc.server"),
"zip": ("itertools", None),
"zip_longest": ("itertools", "itertools"),
"urllib.parse": (("urlparse", "urllib"), "urllib.parse"),
"urllib.error": (("urllib", "urllib2"), "urllib.error"),
"urllib.request": (("urllib", "urllib2"), "urllib.request"),
"urllib.response": ("urllib", "urllib.request"),
}
def check(cmd, mf):
found = False
six_moves = ["six.moves"]
# A number of libraries contain a vendored version
# of six. Automaticly detect those:
for nm in mf.graph.node_list():
if not isinstance(nm, str):
continue
if nm.endswith(".six.moves"):
six_moves.append(nm)
for mod in six_moves:
m = mf.findNode(mod)
if m is None:
continue
# Some users of six use:
#
# import six
# class foo (six.moves.some_module.SomeClass): pass
#
# This does not refer to six.moves submodules
# in a way that modulegraph will recognize. Therefore
# unconditionally include everything in the
# table...
for submod in SIX_TAB:
if submod.startswith("tkinter"):
# Don't autoproces tkinter, that results
# in significantly larger bundles
continue
if sys.version_info[0] == 2:
alt = SIX_TAB[submod][0]
else:
alt = SIX_TAB[submod][1]
if alt is None:
continue
elif not isinstance(alt, tuple):
alt = (alt,)
for nm in alt:
try:
mf.import_hook(nm, m)
found = True
except ImportError:
pass
# Look for submodules that aren't automaticly
# processed.
for submod in SIX_TAB:
if not submod.startswith("tkinter"):
continue
name = mod + "." + submod
m = mf.findNode(name)
if m is not None:
if sys.version_info[0] == 2:
alt = SIX_TAB[submod][0]
else:
alt = SIX_TAB[submod][1]
if alt is None:
continue
elif not isinstance(alt, tuple):
alt = (alt,)
for nm in alt:
mf.import_hook(nm, m)
found = True
if found:
return {}
else:
return None
|
from functools import update_wrapper
import typing as t
import click
from aprsd import config as aprsd_config
from aprsd import log
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
common_options = [
click.option(
"--loglevel",
default="INFO",
show_default=True,
type=click.Choice(
["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
case_sensitive=False,
),
show_choices=True,
help="The log level to use for aprsd.log",
),
click.option(
"-c",
"--config",
"config_file",
show_default=True,
default=aprsd_config.DEFAULT_CONFIG_FILE,
help="The aprsd config file to use for options.",
),
click.option(
"--quiet",
is_flag=True,
default=False,
help="Don't log to stdout",
),
]
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
def process_standard_options(f: F) -> F:
def new_func(*args, **kwargs):
ctx = args[0]
ctx.ensure_object(dict)
ctx.obj["loglevel"] = kwargs["loglevel"]
ctx.obj["config_file"] = kwargs["config_file"]
ctx.obj["quiet"] = kwargs["quiet"]
ctx.obj["config"] = aprsd_config.parse_config(kwargs["config_file"])
log.setup_logging(
ctx.obj["config"], ctx.obj["loglevel"],
ctx.obj["quiet"],
)
del kwargs["loglevel"]
del kwargs["config_file"]
del kwargs["quiet"]
return f(*args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
def process_standard_options_no_config(f: F) -> F:
"""Use this as a decorator when config isn't needed."""
def new_func(*args, **kwargs):
ctx = args[0]
ctx.ensure_object(dict)
ctx.obj["loglevel"] = kwargs["loglevel"]
ctx.obj["config_file"] = kwargs["config_file"]
ctx.obj["quiet"] = kwargs["quiet"]
log.setup_logging_no_config(
ctx.obj["loglevel"],
ctx.obj["quiet"],
)
del kwargs["loglevel"]
del kwargs["config_file"]
del kwargs["quiet"]
return f(*args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
|
# Generated by Django 3.2 on 2021-04-08 17:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
('cadastres', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.TextField()),
('account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.account')),
('cadastre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cadastres.cadastre')),
],
options={
'verbose_name_plural': 'Properties',
'unique_together': {('account', 'external_id')},
},
),
]
|
from math import ceil, log
#1 ip address
ipAddress = input("Enter ip Address: ")
#2 separated in 4 parts => string and binary
firstPart, secondPart, thirdPart, fourthPart = ipAddress.split(".")
ipAddressFourParts = [int(firstPart), int(secondPart), int(thirdPart), int(fourthPart)]
binaryipAddressFourParts = list(map(lambda x: format(int(x),"08b") , ipAddressFourParts))
#3 Class of IP address
if int(firstPart) <= 127:
addressRange = "A"
subnetMaskInitialPart = format(255,"b")
elif 128 <= int(firstPart) <= 191:
addressRange = "B"
subnetMaskInitialPart = format(255,"b") + format(255,"b")
elif 192 <= int(firstPart) <= 239:
addressRange = "C"
subnetMaskInitialPart = format(255,"b") + format(255,"b") + format(255,"b")
print("Address class: ",addressRange)
#4 Default subnet Mask
formation = str("0"+str(32-len(subnetMaskInitialPart))+"b")
tailingZeros = format(0,formation)
defaultSubnetMaskBinary = subnetMaskInitialPart + tailingZeros
defaultSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(defaultSubnetMaskBinary))
defaultSubnetMaskWithDotsBinary = defaultSubnetMaskWithDotsBinary[1:] #to remove . at start
defaultSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , defaultSubnetMaskWithDotsBinary.split(".")))
defaultSubnetMaskWithDotsDec = ".".join(str(x) for x in defaultSubnetMaskWithDotsDecFourParts)
print("Default subnet mask in Binary: ", defaultSubnetMaskWithDotsBinary)
print("Default subnet mask in Decimal: ", defaultSubnetMaskWithDotsDec)
#5 Network Address
networkAddressFourParts = list(map(lambda x: x[0] & x[1] , list(zip(ipAddressFourParts, defaultSubnetMaskWithDotsDecFourParts))))
networkAddressDotDec = ".".join(str(x) for x in networkAddressFourParts)
print("Network Address in Decimal: ", networkAddressDotDec)
binarynetworkAddressFourParts = list(map(lambda x: format(int(x),"08b") , networkAddressFourParts))
networkAddressBin = "".join(str(x) for x in binarynetworkAddressFourParts)
networkAddressDotBin = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddressBin))
networkAddressDotBin = networkAddressDotBin[1:]
print("Network Address in Binary: ", networkAddressDotBin)
networkAddressInitialPart = networkAddressBin[0:len(subnetMaskInitialPart)]
#6 custom subnet mask & host
choice = input("Which information do You have?\n1. CIDR\n2. No of subnet Bits\n3. No of total subnets\n4. No of total hosts\n5. No of usable hosts\nYour choice should be 1, 2, 3, 4 or 5: ")
if choice == '1':
CIDR = input("Enter CIDR value: ")
CIDR = int(CIDR)
subnetBitsCount = CIDR - len(subnetMaskInitialPart)
hostsBitsCount = 32 - CIDR
elif choice == '2':
subnetBitsCount = input("Enter subnet *Bits* you want: ")
subnetBitsCount = int(subnetBitsCount)
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '3':
totalSubnets = input("Enter total number of Subnets: ")
totalSubnets = int(totalSubnets)
subnetBitsCount = ceil(log(totalSubnets)/(log(2)))
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '4':
totalHosts = input("Enter total number of Hosts: ")
totalHosts = int(totalHosts)
hostsBitsCount = ceil(log(totalHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
elif choice == '5':
usableHosts = input("Enter usableHosts value: ")
usableHosts = int(usableHosts)
usableHosts = usableHosts + 2
hostsBitsCount = ceil(log(usableHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
else:
print("Please input correct choice from 1 to 4 only...")
numberOfSubnets = (2**subnetBitsCount)
numberOfHosts = (2**hostsBitsCount)
print("Number of Subnet bits: ", subnetBitsCount)
print("Total Number of subnets: ", numberOfSubnets)
print("Number of host bits: ", hostsBitsCount)
print("Total Number of Hosts: ", numberOfHosts)
#7 CUSTOM subnet
formation = str("0"+str(subnetBitsCount+len(subnetMaskInitialPart))+"b")
customSubnet = format(2**(int(subnetBitsCount+len(subnetMaskInitialPart)))-1, formation)
formation = str("0"+str(hostsBitsCount)+"b")
customSubnetTrailingZero = format(0,formation)
customSubnetMaskBinary = customSubnet + customSubnetTrailingZero
customSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(customSubnetMaskBinary))
customSubnetMaskWithDotsBinary = customSubnetMaskWithDotsBinary[1:] #to remove . at start
customSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , customSubnetMaskWithDotsBinary.split(".")))
customSubnetMaskWithDotsDec = ".".join(str(x) for x in customSubnetMaskWithDotsDecFourParts)
print("Custom subnet Mask in Binary: ", customSubnetMaskWithDotsBinary)
print("Custom subnet Mask in Decimal: ", customSubnetMaskWithDotsDec)
def my_function(initialPart):
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
formation = str("0"+str(hostsBitsCount)+"b")
networkAddressHostBits = format(0, formation)
networkAddressHostSubnet = subnetAddressBits + networkAddressHostBits
networkAddress = initialPart + networkAddressHostSubnet
networkAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddress))
networkAddressWithDotsBinary = networkAddressWithDotsBinary[1:] #to remove . at start
networkAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , networkAddressWithDotsBinary.split(".")))
networkAddressWithDotsDec = ".".join(str(x) for x in networkAddressWithDotsDecFourParts)
broadcastAddressHostBits = format(2**(int(hostsBitsCount))-1, formation)
broadcastAddressHostSubnet = subnetAddressBits + broadcastAddressHostBits
broadcastAddress = initialPart + broadcastAddressHostSubnet
broadcastAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(broadcastAddress))
broadcastAddressWithDotsBinary = broadcastAddressWithDotsBinary[1:] #to remove . at start
broadcastAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , broadcastAddressWithDotsBinary.split(".")))
broadcastAddressWithDotsDec = ".".join(str(x) for x in broadcastAddressWithDotsDecFourParts)
print(networkAddressWithDotsDec ," to ", broadcastAddressWithDotsDec)
print("In binary: ", networkAddressWithDotsBinary," to ", broadcastAddressWithDotsBinary)
#8 Need information specific subnet
while(True):
subnetNumber = int(input("Enter subnet's number you want: "))
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
continue
print("You required of: ",subnetNumber)
print("-"*80)
print("Network Range: ", end="")
my_function(networkAddressInitialPart)
print("-"*80)
print("Subnet Range: ", end="")
my_function(subnetMaskInitialPart)
|
# Generated by Django 3.0.2 on 2020-06-29 23:19
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('leagues', '0002_auto_20200629_2319'),
('teams', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='YahooMatchup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('updated_timestamp', models.DateTimeField(auto_now=True, verbose_name='Updated At')),
('remote_id', models.CharField(blank=True, max_length=1024, null=True, unique=True, verbose_name='Remote Object Id')),
('home_win', models.IntegerField(default=0)),
('home_loss', models.IntegerField(default=0)),
('home_draw', models.IntegerField(default=0)),
('visitor_win', models.IntegerField(default=0)),
('visitor_loss', models.IntegerField(default=0)),
('visitor_draw', models.IntegerField(default=0)),
('home_team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matchups_as_home', to='teams.YahooTeam', verbose_name='Yahoo Home Team')),
('league', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matchups', to='leagues.YahooLeague', verbose_name='Yahoo League')),
('visitor_team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matchups_as_visitors', to='teams.YahooTeam', verbose_name='Yahoo Visitor Team')),
('week', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matchups', to='leagues.YahooLeagueWeeks', verbose_name='Yahoo Week')),
],
options={
'abstract': False,
},
),
]
|
from itertools import product
a,b=map(int, input().split())
c=(list(map(int, input().split()))[1:] for _ in range(a))
r=map(lambda x: sum(i**2 for i in x) % b, product(*c))
print(max(r))
|
import logging
import os
import re
from collections import Counter
import numpy as np
from data.data_iterator import ids2seq
def load_file(path):
"""Load file formated with one sentence per line and tokens separated by spaces."""
with open(path, "r", encoding="utf8") as file:
outputs = [line.strip().split() for line in file]
return outputs
def iob2iobes(iob):
"""Converts a list of IOB tags to IOBES scheme."""
iobes = []
tags = iob + ["O"]
for i in range(len(tags) - 1):
tag, next_tag = tags[i: i + 2]
if tag == "O":
iobes.append("O")
else:
if tag[0] == "B":
if next_tag[0] in "OB" or not "-".join(next_tag.split("-")[1:]) == "-".join(tag.split("-")[1:]):
iobes.append("S-" + "-".join(tag.split("-")[1:]))
else:
iobes.append(tag)
elif tag[0] == "I":
if next_tag[0] == "O" or not "-".join(next_tag.split("-")[1:]) == "-".join(tag.split("-")[1:]):
iobes.append("E-" + "-".join(tag.split("-")[1:]))
else:
iobes.append(tag)
return iobes
def iobes2iob(iobes):
"""Converts a list of IOBES tags to IOB scheme."""
dico = {pfx: pfx for pfx in "IOB"}
dico.update({"S": "B", "E": "I"})
return [dico[t[0]] + t[1:] if not t == "O" else "O" for t in iobes]
def iob_scheme(tags, iobes=True):
""" Transform tag sequence without any scheme into IOB or IOBES scheme """
iob = []
tags = ["O"] + tags + ["O"]
for i in range(1, len(tags) - 1):
prev_tag, tag, next_tag = tags[i - 1: i + 2]
if not tag == "O":
if tag not in [prev_tag, next_tag]:
if iobes:
iob.append("S-" + tag)
else:
iob.append("B-" + tag)
elif not tag == prev_tag:
iob.append("B-" + tag)
elif not tag == next_tag:
if iobes:
iob.append("E-" + tag)
else:
iob.append("I-" + tag)
else:
iob.append("I-" + tag)
else:
iob.append("O")
return iob
def build_word_vocab(data_path, filenames, mincount=1, whole_dataset=False):
"""Computes word vocabulary of several files and save it to "vocab.words.txt".
Args:
data_path (str) : Path of the folder containing the sentence files named "[filename].words.txt"
filenames (list) : list of filenames (ex : ["train"] if file is named "train.words.txt")
mincount (int) : minimal number of occurences to consider (default:1)
Returns:
set of words kept
"""
word_counter = Counter()
for fn in filenames:
vocab = Counter()
with open(data_path + fn + ".words.txt", "r", encoding="utf8") as file:
for line in file:
word_counter.update(line.strip().split())
vocab.update(line.strip().split())
vocab = {w for w, c in vocab.items() if c >= mincount}
with open(data_path + "vocab.{}.words.txt".format(fn), "w", encoding="utf8") as file:
for w in sorted(list(vocab)):
file.write(w + "\n")
word_vocab = {w for w, c in word_counter.items() if c >= mincount}
if whole_dataset:
with open(data_path + "vocab.words.txt", "w", encoding="utf8") as file:
for w in sorted(list(word_vocab)):
file.write(w + "\n")
print('Word vocabulary built. Kept {} out of {}'.format(len(word_vocab), len(word_counter)))
return word_vocab
def build_char_vocab(word_vocab, data_path, save_file="vocab.chars.txt"):
"""Computes char vocabulary given a word vocabulary path and save it to "vocab.chars.txt".
Args:
word_vocab (set) : set of words in the vocabulary
data_path (str) : Path of the folder containing "vocab.words.txt"
Returns:
set of chars
"""
char_vocab = set()
for w in word_vocab:
char_vocab.update(w)
with open(os.path.join(data_path, save_file), "w", encoding="utf8") as file:
for w in sorted(list(char_vocab)):
file.write(w + "\n")
print('Char vocabulary built. Found {} chars'.format(len(char_vocab)))
return char_vocab
def build_tag_vocab(data_path, filenames, iobes=True):
"""Computes tag vocabulary of several files and save it to "vocab.[scheme].txt".
Args:
data_path (str) : Path of the folder containing the tag files named "[filename].[scheme].txt"
filenames (list) : list of filenames (ex : ["train"] if file is named "train.iobes.txt")
iobes (bool) : whether to use IOBES scheme (default:True) else IOB scheme
"""
if iobes:
ext = ".iobes.txt"
else:
ext = ".iob.txt"
tag_vocab = set()
for fn in filenames:
with open(data_path + fn + ext, "r", encoding="utf8") as file:
for line in file:
tag_vocab.update(line.strip().split())
with open(data_path + "vocab" + ext, "w", encoding="utf8") as file:
for t in sorted(list(tag_vocab)):
file.write(t + "\n")
print('Tag vocabulary built. Found {} tags'.format(len(tag_vocab)))
def build_vocab(data_path, filenames=["train", "dev", "test"], mincount=1):
"""Build word, char and tag vocabulary in both IOB and IOBES."""
word_vocab = build_word_vocab(data_path, filenames, mincount, whole_dataset=True)
build_char_vocab(word_vocab, data_path)
train_word_vocab = build_word_vocab(data_path, ["train"], mincount)
build_char_vocab(train_word_vocab, data_path, save_file="vocab.train.chars.txt")
build_tag_vocab(data_path, filenames, iobes=True)
build_tag_vocab(data_path, filenames, iobes=False)
def extract_entities(words, iob_tags):
""" Retrieve entities, types and spans given a tokenized sentence and IOB tags
Args:
words (list) : list of word strings
iob_tags (list) : list of IOB tags
Returns:
entities (list) : list of words in the extracted entities
types (list) : list of types of the extracted entities
indices (list) : list of spans (begin idx, end idx) of the entities
"""
entities = []
types = []
indices = []
tmp_ent = None
tmp_indices = None
tmp_type = "O"
for i, (w, t) in enumerate(zip(words, iob_tags)):
if t[0] == "B":
if tmp_ent is not None:
entities.append(tmp_ent)
indices.append(tmp_indices)
types.append(tmp_type)
tmp_ent = w
tmp_type = "-".join(t.split("-")[1:])
tmp_indices = [i]
elif t[0] == "O":
if tmp_ent is not None:
entities.append(tmp_ent)
indices.append(tmp_indices)
types.append(tmp_type)
tmp_ent = None
tmp_type = None
tmp_indices = None
elif t[0] == "I":
if "-".join(t.split("-")[1:]) == tmp_type and i == tmp_indices[-1] + 1:
tmp_ent += " " + w
tmp_indices += [i]
else:
if tmp_ent is not None:
entities.append(tmp_ent)
indices.append(tmp_indices)
types.append(tmp_type)
tmp_ent = w
tmp_type = "-".join(t.split("-")[1:])
tmp_indices = [i]
if tmp_ent is not None:
entities.append(tmp_ent)
indices.append(tmp_indices)
types.append(tmp_type)
return list(zip(entities, types, indices))
def extract_iob(iob_tags):
""" Retrieve types and spans given a tokenized sentence and IOB tags
Args:
iob_tags (list) : list of IOB tags
Returns:
types (list) : list of types of the extracted entities
indices (list) : list of spans (begin idx, end idx) of the entities
"""
types = []
indices = []
tmp_indices = None
tmp_type = "O"
for i, t in enumerate(iob_tags):
if t[0] == "B":
if tmp_indices is not None:
indices.append(tmp_indices)
types.append(tmp_type)
tmp_type = "-".join(t.split("-")[1:])
tmp_indices = [i]
elif t[0] == "O":
if tmp_indices is not None:
indices.append(tmp_indices)
types.append(tmp_type)
tmp_type = None
tmp_indices = None
elif t[0] == "I":
if "-".join(t.split("-")[1:]) == tmp_type and i == tmp_indices[-1] + 1:
tmp_indices += [i]
else:
if tmp_indices is not None:
indices.append(tmp_indices)
types.append(tmp_type)
tmp_type = "-".join(t.split("-")[1:])
tmp_indices = [i]
if tmp_indices is not None:
indices.append(tmp_indices)
types.append(tmp_type)
return list(zip(types, indices))
def extract_entities_corpus(data, unique=True):
entities = {}
if "words" in data.keys():
for w, t in zip(data["words"], data["tags"]):
ents = extract_entities(w.split(), iobes2iob(t.split()))
for e, t, s in ents:
if not t in entities.keys():
entities[t] = [e]
else:
entities[t].append(e)
if unique:
for type in entities.keys():
entities[type] = set(entities[type])
else:
for split in data.keys():
if not len(entities):
entities = extract_entities_corpus(data[split])
else:
to_append = extract_entities_corpus(data[split])
assert set(to_append.keys()) == set(entities.keys())
for k in entities.keys():
entities[k].extend(to_append[k])
return entities
def partial_match(ent, entities, stop_words=None):
word_set = set(np.concatenate([ent.split() for ent in set(entities)]))
if stop_words is None:
stop_words = set(". , : ; ? ! ( ) \" ' % - the on 's of a an".split())
else:
stop_words = set(". , : ; ? ! ( ) \" ' % - the on 's of a an".split() + stop_words)
for e in ent.split():
if e in word_set and not e.lower() in stop_words:
return True
return False
def compute_overlap(words, tags, train_entities, stop_words=None, verbose=False):
overlaps = []
splits = ["EXACT", "PARTIAL", "NEW"]
overlap_count = {ent_type: {split: 0 for split in splits} for ent_type in train_entities.keys()}
for i, (w, t) in enumerate(zip(words, tags)):
t = iobes2iob(t.split())
w = w.split()
entities = extract_entities(w, t)
current_overlap = ["O"] * len(w)
for ent, typ, span in entities:
if ent in train_entities[typ]:
for idx in span:
current_overlap[idx] = "EXACT"
overlap_count[typ]["EXACT"] += 1
elif partial_match(ent, train_entities[typ], stop_words=stop_words):
for idx in span:
current_overlap[idx] = "PARTIAL"
overlap_count[typ]["PARTIAL"] += 1
else:
for idx in span:
current_overlap[idx] = "NEW"
overlap_count[typ]["NEW"] += 1
overlaps.append(current_overlap)
for ent_type in train_entities.keys():
n_mentions = sum([overlap_count[ent_type][split] for split in splits])
if verbose:
logging.info(ent_type)
logging.info("{} mentions".format(n_mentions))
logging.info("{} exact overlapping".format(overlap_count[ent_type]["EXACT"]))
logging.info("{} %".format(100 * overlap_count[ent_type]["EXACT"] / n_mentions))
logging.info("{} partial overlapping".format(overlap_count[ent_type]["PARTIAL"]))
logging.info("{} %".format(100 * overlap_count[ent_type]["PARTIAL"] / n_mentions))
logging.info("{} unseen".format(overlap_count[ent_type]["NEW"]))
logging.info("{} %".format(100 * overlap_count[ent_type]["NEW"] / n_mentions))
logging.info("")
return overlaps, overlap_count
|
import numpy as np
def get_phaselc(t, p, data, v_num):
return 1.+p.amp1[v_num]*np.cos(2.*np.pi*(t-p.theta1[v_num])/p.per[v_num]) + p.amp2[v_num]*np.cos(4.*np.pi*(t-p.theta2[v_num])/p.per[v_num])
|
from .LayoutManager import LayoutManager
|
"""
"""
import numpy as np
from ..axis_ratio_model import monte_carlo_halo_shapes
def _enforce_constraints(b_to_a, c_to_a, e, p):
assert np.all(b_to_a > 0), "All elements of b_to_a must be strictly positive"
assert np.all(c_to_a > 0), "All elements of c_to_a must be strictly positive"
assert np.all(b_to_a <= 1), "No element of b_to_a can exceed unity"
assert np.all(c_to_a <= 1), "No element of c_to_a can exceed unity"
assert np.all(b_to_a >= c_to_a), "No element in c_to_a can exceed the corresponding b_to_a"
assert np.all(e <= 0.5), "No element of ellipticity can exceed 0.5"
assert np.all(p <= 0.5), "No element of prolaticity can exceed 0.5"
assert np.all(e >= 0.), "ellipticity must be non-negative"
assert np.all(p >= -0.25), "prolaticity cannot exceed -0.25"
def test1():
"""Enforce monte_carlo_halo_shapes doesn't crash when given crazy halo masses
"""
npts = int(1e5)
logmhalo = np.linspace(-10, 20, npts)
b_to_a, c_to_a, e, p = monte_carlo_halo_shapes(logmhalo)
_enforce_constraints(b_to_a, c_to_a, e, p)
def test2():
"""Enforce expected scaling with halo mass
"""
npts = int(1e4)
b_to_a, c_to_a, e, p = monte_carlo_halo_shapes(np.zeros(npts) + 11)
b_to_a2, c_to_a2, e2, p2 = monte_carlo_halo_shapes(np.zeros(npts) + 15)
assert e.mean() < e2.mean(), "Higher-mass halos should be more elliptical"
assert p.mean() < p2.mean(), "Higher-mass halos should be more prolate"
assert b_to_a.mean() > b_to_a2.mean(), "Higher-mass halos should have more elongated axes"
assert c_to_a.mean() > c_to_a2.mean(), "Higher-mass halos should have more elongated axes"
def test3():
"""Enforce reasonable correlation coefficient between
axis ratios and ellipticity and prolaticity
"""
npts = int(1e4)
b_to_a, c_to_a, e, p = monte_carlo_halo_shapes(np.zeros(npts) + 11)
r = np.corrcoef(b_to_a, c_to_a)[0, 1]
assert r > 0.5, "b_to_a and c_to_a should be highly correlated"
r = np.corrcoef(e, p)[0, 1]
assert r > 0.5, "ellipticity and prolaticity should be highly correlated"
|
import os
import json
import numpy as np
import cv2
import matplotlib.pyplot as plt
root_dir = '../FS2K'
root_dir_photo = os.path.join(root_dir, 'photo')
root_dir_sketch = os.path.join(root_dir, 'sketch')
photo_paths = []
for photo_dir in os.listdir(root_dir_photo):
photo_dir = os.path.join(root_dir_photo, photo_dir)
photo_paths += sorted(os.listdir(photo_dir))
json_file_train = os.path.join(root_dir, 'anno_train.json')
json_file_test = os.path.join(root_dir, 'anno_test.json')
with open(json_file_test, 'r') as f:
json_data = json.loads(f.read())
attrs = {}
for attr in json_data[0].keys():
attrs[attr] = []
for idx_fs, fs in enumerate(json_data):
for attr in fs:
attrs[attr].append(fs[attr])
for idx_image_name, image_name in enumerate(attrs['image_name']):
if idx_image_name < 0:
continue
print('{}/{},'.format(idx_image_name+1, len(attrs['image_name'])), image_name)
image_name += '.jpg'
# Attributes
if 'nan' in attrs['lip_color'][idx_image_name]:
skin_color = 'nan'
lip_color = 'nan'
eye_color = 'nan'
hair_color = 'nan'
else:
skin_color = np.array(attrs['skin_color'][idx_image_name]).astype(int).tolist()
lip_color = np.array(attrs['lip_color'][idx_image_name]).astype(np.uint8)
eye_color = np.array(attrs['eye_color'][idx_image_name]).astype(np.uint8)
hair_color = np.array(attrs['hair_color'][idx_image_name]).astype(np.uint8)
hair = int(attrs['hair'][idx_image_name])
gender = int(attrs['gender'][idx_image_name])
earring = int(attrs['earring'][idx_image_name])
smile = int(attrs['smile'][idx_image_name])
frontal_face = int(attrs['frontal_face'][idx_image_name])
style = int(attrs['style'][idx_image_name])
# Processing
photo_path = os.path.join(root_dir, 'photo', image_name)
photo = cv2.imread(photo_path)
sketch_path = os.path.join(root_dir, 'photo', image_name).replace('photo', 'sketch').replace('image', 'sketch')
sketch = cv2.imread(sketch_path)
if sketch is None:
sketch = cv2.imread(sketch_path.replace('.jpg', '.png'))
if sketch is None:
sketch = cv2.imread(sketch_path.replace('.png', '.jpg'))
split_line = np.zeros((photo.shape[0], 10, 3), dtype=photo.dtype)
if 'nan' not in attrs['lip_color'][idx_image_name]:
cv2.rectangle(photo, (skin_color[0] - 10, skin_color[1] - 10), (skin_color[0] + 10, skin_color[1] + 10), (0, 0, 255), 1)
lip_color_region = (np.zeros((photo.shape[0], 50, 3)) + lip_color[::-1]).astype(photo.dtype)
eye_color_region = (np.zeros((photo.shape[0], 50, 3)) + eye_color[::-1]).astype(photo.dtype)
# for i in [photo, split_line, sketch, split_line, lip_color_region, split_line, eye_color_region]:
# print(i.shape)
# comp = cv2.hconcat([photo, split_line, cv2.resize(sketch, photo.shape[:2][::-1], cv2.INTER_LINEAR), split_line, lip_color_region, split_line, eye_color_region])
comp = cv2.hconcat([photo, split_line, sketch, split_line, lip_color_region, split_line, eye_color_region])
plt.figure(figsize=(16, 10))
plt.imshow(cv2.cvtColor(comp, cv2.COLOR_BGR2RGB))
if hair == 0:
hair = 'Yes'
else:
hair = 'No'
if hair_color == 0:
hair_color = 'Brown'
elif hair_color == 1:
hair_color = 'Black'
elif hair_color == 2:
hair_color = 'Red'
elif hair_color == 3:
hair_color = 'No'
elif hair_color == 4:
hair_color = 'Golden'
if gender == 0:
gender = 'Male'
else:
gender = 'Female'
if earring == 0:
earring = 'Yes'
else:
earring = 'No'
if smile == 0:
smile = 'No'
else:
smile = 'Yes'
if frontal_face == 0:
frontal_face = '<=30'
else:
frontal_face = '>30'
style += 1
plt.title('{}\nskin color selection region={},\nlip color={} (1st color bar),\neye color={} (2nd color bar),\nhair={}, hair color={}, gender={}, earring={}, smile={}, frontal_face={}, style={}'.format(
os.path.join('photo', image_name), skin_color, lip_color, eye_color, hair, hair_color, gender, earring, smile, frontal_face, style
))
plt.show()
|
from tkinter import *
class BillCalculator:
def __init__(self):
window = Tk()
window.title("Bill Calculator")
# input fields
Label(window, text = "How much is the bill?").grid(row = 1,column = 1, sticky = W)
Label(window, text = "How many people?").grid(row = 2, column = 1, sticky = W)
Label(window, text = "How much % tip?").grid(row = 3, column = 1, sticky = W)
Label(window, text = "Bill per person:").grid(row = 4, column = 1, sticky = W)
# for taking inputs
self.billVar = StringVar()
Entry(window, textvariable = self.billVar, justify = RIGHT).grid(row = 1, column = 2)
self.peopleVar = StringVar()
Entry(window, textvariable = self.peopleVar, justify = RIGHT).grid(row = 2, column = 2)
self.tipVar = StringVar()
Entry(window, textvariable = self.tipVar, justify = RIGHT).grid(row = 3, column = 2)
self.splitBillVar = StringVar()
lblSplitBill = Label(window, textvariable = self.splitBillVar).grid(row = 4, column = 2, sticky = E)
# calculate button
button_calculate = Button(window, text = "Calculate", command = self.calculateBill).grid(row = 6, column = 2, sticky = E)
window.mainloop() # Create an event loop
# calculate total payment
def calculateBill(self):
splitBill = self.totalSum(float(self.billVar.get()), float(self.tipVar.get()), int(self.peopleVar.get()))
self.splitBillVar.set(splitBill)
def totalSum(self, bill, tip, people):
splitBill = round(((bill + ((tip * bill) / 100)) / people), 2)
return splitBill
root = Tk() # create the widget
# run the program
BillCalculator()
|
import sys
try:
from io import StringIO
from io import BytesIO
except ImportError:
from StringIO import StringIO as StringIO
from StringIO import StringIO as BytesIO
import unittest
import mitogen.core
from mitogen.core import b
import testlib
####
#### see also message_test.py / PickledTest
####
class BlobTest(testlib.TestCase):
klass = mitogen.core.Blob
def make(self):
return self.klass(b('x') * 128)
def test_repr(self):
blob = self.make()
self.assertEqual('[blob: 128 bytes]', repr(blob))
def test_decays_on_constructor(self):
blob = self.make()
self.assertEqual(b('x') * 128, mitogen.core.BytesType(blob))
def test_decays_on_write(self):
blob = self.make()
io = BytesIO()
io.write(blob)
self.assertEqual(128, io.tell())
self.assertEqual(b('x') * 128, io.getvalue())
def test_message_roundtrip(self):
blob = self.make()
msg = mitogen.core.Message.pickled(blob)
blob2 = msg.unpickle()
self.assertEqual(type(blob), type(blob2))
self.assertEqual(repr(blob), repr(blob2))
self.assertEqual(mitogen.core.BytesType(blob),
mitogen.core.BytesType(blob2))
class SecretTest(testlib.TestCase):
klass = mitogen.core.Secret
def make(self):
return self.klass('password')
def test_repr(self):
secret = self.make()
self.assertEqual('[secret]', repr(secret))
def test_decays_on_constructor(self):
secret = self.make()
self.assertEqual('password', mitogen.core.UnicodeType(secret))
def test_decays_on_write(self):
secret = self.make()
io = StringIO()
io.write(secret)
self.assertEqual(8, io.tell())
self.assertEqual('password', io.getvalue())
def test_message_roundtrip(self):
secret = self.make()
msg = mitogen.core.Message.pickled(secret)
secret2 = msg.unpickle()
self.assertEqual(type(secret), type(secret2))
self.assertEqual(repr(secret), repr(secret2))
self.assertEqual(mitogen.core.b(secret),
mitogen.core.b(secret2))
class KwargsTest(testlib.TestCase):
klass = mitogen.core.Kwargs
def test_empty(self):
kw = self.klass({})
self.assertEqual({}, kw)
self.assertEqual('Kwargs({})', repr(kw))
klass, (dct,) = kw.__reduce__()
self.assertTrue(klass is self.klass)
self.assertTrue(type(dct) is dict)
self.assertEqual({}, dct)
@unittest.skipIf(condition=(sys.version_info >= (2, 6)),
reason='py<2.6 only')
def test_bytes_conversion(self):
kw = self.klass({u'key': 123})
self.assertEqual({'key': 123}, kw)
self.assertEqual("Kwargs({'key': 123})", repr(kw))
@unittest.skipIf(condition=not mitogen.core.PY3,
reason='py3 only')
def test_unicode_conversion(self):
kw = self.klass({mitogen.core.b('key'): 123})
self.assertEqual({u'key': 123}, kw)
self.assertEqual("Kwargs({'key': 123})", repr(kw))
klass, (dct,) = kw.__reduce__()
self.assertTrue(klass is self.klass)
self.assertTrue(type(dct) is dict)
self.assertEqual({u'key': 123}, dct)
key, = dct
self.assertTrue(type(key) is mitogen.core.UnicodeType)
class AdornedUnicode(mitogen.core.UnicodeType):
pass
class ToTextTest(testlib.TestCase):
func = staticmethod(mitogen.core.to_text)
def test_bytes(self):
s = self.func(mitogen.core.b('bytes'))
self.assertEqual(mitogen.core.UnicodeType, type(s))
self.assertEqual(s, u'bytes')
def test_unicode(self):
s = self.func(u'text')
self.assertEqual(mitogen.core.UnicodeType, type(s))
self.assertEqual(s, u'text')
def test_adorned_unicode(self):
s = self.func(AdornedUnicode(u'text'))
self.assertEqual(mitogen.core.UnicodeType, type(s))
self.assertEqual(s, u'text')
def test_integer(self):
s = self.func(123)
self.assertEqual(mitogen.core.UnicodeType, type(s))
self.assertEqual(s, u'123')
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the i18n_dashboard module."""
__author__ = [
'johncox@google.com (John Cox)',
]
SITE_SETTINGS_CACHE_TRANSLATIONS = """
If "True", translations are cached. During course development you should turn
this setting to "False" so you can see your change instantaneously. Otherwise,
keep this setting at "True" to maximize performance.
"""
|
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from math import sqrt
import numpy as np
import scipy
from deap import cma, base, creator, tools #@UnresolvedImport
from pyxrd.refinement.refine_method import RefineMethod
from pyxrd.refinement.refine_method_option import RefineMethodOption
from pyxrd.refinement.refine_async_helper import RefineAsyncHelper
from .deap_utils import pyxrd_array, PyXRDParetoFront, FitnessMin, result_func
# Default settings:
NGEN = 100
STAGN_NGEN = 10
STAGN_TOL = 0.001
class Strategy(cma.Strategy):
"""
This evolutionary strategy supports the hybrid PSO-CMA runs using the
rotate_and_bias function (should be called after an update).
"""
def __init__(self, centroid, sigma, ranges, **kwargs):
self.ranges = ranges
super(Strategy, self).__init__(centroid, sigma, **kwargs)
def update(self, population):
"""Update the current covariance matrix strategy from the
*population*.
:param population: A list of individuals from which to update the
parameters.
"""
population.sort(key=lambda ind: ind.fitness, reverse=True)
selected_pop = self._translate_external(
np.array([ind.to_ndarray() for ind in population[0:self.mu]]))
old_centroid = self._translate_external(self.centroid)
centroid = np.dot(self.weights, selected_pop)
c_diff = centroid - old_centroid
# Cumulation : update evolution path
self.ps = (1 - self.cs) * self.ps \
+ sqrt(self.cs * (2 - self.cs) * self.mueff) / self.sigma \
* np.dot(self.B, (1. / self.diagD) \
* np.dot(self.B.T, c_diff))
hsig = float((np.linalg.norm(self.ps) /
sqrt(1. - (1. - self.cs) ** (2. * (self.update_count + 1.))) / self.chiN
< (1.4 + 2. / (self.dim + 1.))))
self.update_count += 1
self.pc = (1 - self.cc) * self.pc + hsig \
* sqrt(self.cc * (2 - self.cc) * self.mueff) / self.sigma \
* c_diff
# Update covariance matrix
artmp = selected_pop - old_centroid
new_C = (1 - self.ccov1 - self.ccovmu + (1 - hsig) \
* self.ccov1 * self.cc * (2 - self.cc)) * self.C \
+ self.ccov1 * np.outer(self.pc, self.pc) \
+ self.ccovmu * np.dot((self.weights * artmp.T), artmp) \
/ self.sigma ** 2
self.sigma *= np.exp((np.linalg.norm(self.ps) / self.chiN - 1.) \
* self.cs / self.damps)
try:
self.diagD, self.B = np.linalg.eigh(new_C)
except np.linalg.LinAlgError:
logger.warning(
"LinAlgError occurred when calculating eigenvalues" \
" and vectors for matrix C!\n%r" % new_C
)
else:
self.C = new_C
indx = np.argsort(self.diagD)
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.diagD = self.diagD ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
self.centroid = self._translate_internal(centroid)
def rotate_and_bias(self, global_best, tc=0.1, b=0.5, cp=0.5):
"""
Rotates the covariance matrix and biases the centroid of this
CMA population towards a global mean. Can be used to implement a
PSO-CMA hybrid algorithm.
"""
global_best = self._translate_external(global_best)
centroid = self._translate_external(self.centroid)
# Rotate towards global:
pg = np.array(global_best) - np.array(centroid)
Brot = self.__rotation_matrix(self.B[:, 0], pg) * self.B
Crot = Brot * (self.diagD ** 2) * Brot.T
self.C = cp * self.C + (1.0 - cp) * Crot
# Bias our mean towards global best mean:
npg = np.linalg.norm(pg)
nsigma = np.amax(self.sigma)
if nsigma < npg:
if nsigma / npg <= tc * npg:
bias = b * pg
else:
bias = nsigma / npg * pg
else:
bias = 0
centroid = centroid + bias
self.centroid = self._translate_internal(centroid)
pass
def _translate_internal(self, solutions):
# rule is: anything given as an argument in a public function or
# available as a public property should be within the external boundaries
return self.ranges[:, 0] + (self.ranges[:, 1] - self.ranges[:, 0]) * (1.0 - np.cos(solutions * np.pi)) / 2.0
def _translate_external(self, solutions):
return np.arccos(1 - 2 * (solutions - self.ranges[:, 0]) / (self.ranges[:, 1] - self.ranges[:, 0])) / np.pi
def generate(self, ind_init):
"""Generate a population from the current strategy using the
centroid individual as parent.
:param ind_init: A function object that is able to initialize an
individual from a list.
:returns: an iterator yielding the generated individuals.
"""
centroid = self._translate_external(self.centroid)
arz = np.random.standard_normal((self.lambda_, self.dim)) #@UndefinedVariable
arz = np.array(centroid) + self.sigma * np.dot(arz, self.BD.T) #@UndefinedVariable
arz = self._translate_internal(arz)
for arr in arz:
yield ind_init(arr)
def __rotation_matrix(self, vector, target):
""" Rotation matrix from one vector to another target vector.
The solution is not unique as any additional rotation perpendicular to
the target vector will also yield a solution)
However, the output is deterministic.
"""
R1 = self.__rotation_to_pole(target)
R2 = self.__rotation_to_pole(vector)
return np.dot(R1.T, R2)
def __rotation_to_pole(self, target):
""" Rotate to 1,0,0... """
n = len(target)
working = target
rm = np.eye(n)
for i in range(1, n):
angle = np.arctan2(working[0], working[i])
rm = np.dot(self.__rotation_matrix_inds(angle, n, 0, i), rm)
working = np.dot(rm, target)
return rm
def __rotation_matrix_inds(self, angle, n, ax1, ax2):
""" 'n'-dimensional rotation matrix 'angle' radians in coordinate plane with
indices 'ax1' and 'ax2' """
s = np.sin(angle)
c = np.cos(angle)
i = np.eye(n)
i[ax1, ax1] = s
i[ax1, ax2] = c
i[ax2, ax1] = c
i[ax2, ax2] = -s
return i
pass #end of class
class Algorithm(RefineAsyncHelper):
"""
This algorithm implements the ask-tell model proposed in
[Colette2010]_, where ask is called `generate` and tell is called `update`.
Modified (Mathijs Dumon) so it checks for stagnation.
"""
@property
def ngen(self):
return self._ngen
@ngen.setter
def ngen(self, value):
self._ngen = value
logger.info("Setting ngen to %d" % value)
_ngen = 100
gen = -1
halloffame = None
refiner = None
toolbox = None
stats = None
stagn_ngen = None
stagn_tol = None
verbose = False
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def __init__(self, toolbox, halloffame, stats, ngen=NGEN,
verbose=__debug__, stagn_ngen=STAGN_NGEN,
stagn_tol=STAGN_TOL, refiner=None, stop=None):
"""
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param ngen: The number of generations.
:param halloffame: A :class:`~deap.tools.ParetoFront` object that will
contain the best individuals.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace.
:param verbose: Whether or not to log the statistics.
:param stagn_gens: The minimum number of generations to wait before
checking for stagnation
:param stagn_tol: The stagnation tolerance. Higher values means a
harsher tolerance, values should fall between 0 and 1
:param refiner: PyXRD refiner object
:returns: The best individual and the final population.
The toolbox should contain a reference to the generate and the update method
of the chosen strategy.
Call the run() method when the algorithm should be run.
.. [Colette2010] Collette, Y., N. Hansen, G. Pujol, D. Salazar Aponte and
R. Le Riche (2010). On Object-Oriented Programming of Optimizers -
Examples in Scilab. In P. Breitkopf and R. F. Coelho, eds.:
Multidisciplinary Design Optimization in Computational Mechanics,
Wiley, pp. 527-565;
"""
self.stats = stats
self.toolbox = toolbox
self.ngen = ngen
self.halloffame = halloffame
self.verbose = verbose
self.stagn_ngen = stagn_ngen
self.stagn_tol = stagn_tol
self.refiner = refiner
self.gen = 0
self._stop = stop
#--------------------------------------------------------------------------
# Run method:
#--------------------------------------------------------------------------
def run(self):
"""Will run this algorithm"""
if self.verbose:
column_names = ["gen", "evals", "best"]
if self.stats is not None:
column_names += list(self.stats.functions.keys())
self.logbook = tools.Logbook()
self.logbook.header = column_names
for _ in range(self.ngen):
# Check if the user has cancelled:
if self._user_cancelled():
self.refiner.status.message = "Stopping..."
logger.info("User cancelled execution, stopping ...")
break
#ASK: Generate a new population:
population = self._ask()
#TELL: Update the strategy with the evaluated individuals
self._tell(population)
#RECORD: For stagnation checking & logging:
self._record(population)
#CHECK: whether we are stagnating:
if self._is_stagnating():
logging.info("CMA: stagnation detected!")
break
return self.refiner.history.best_solution, population
#--------------------------------------------------------------------------
# Stagnation calls:
#--------------------------------------------------------------------------
def _is_flat(self, yvals, xvals, slope_tolerance=0.001):
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xvals, yvals) #@UndefinedVariable @UnusedVariable
val = bool(abs(slope) <= slope_tolerance)
return val
def _is_stagnating(self):
self.refiner.status.message = "Checking for stagnation"
if self.gen >= self.stagn_ngen: # 10
std, best = self.logbook.select("std", "best")
std = np.array(std)[:, 0]
yvals1 = std[-(self.stagn_ngen - 1):]
xvals1 = list(range(len(yvals1)))
yvals2 = best[-(self.stagn_ngen - 1):]
xvals2 = list(range(len(yvals2)))
return self._is_flat(yvals1, xvals1, self.stagn_tol) and \
self._is_flat(yvals2, xvals2, self.stagn_tol)
else:
return False
#--------------------------------------------------------------------------
# Ask, tell & record:
#--------------------------------------------------------------------------
def _ask(self):
self.gen += 1
self.refiner.status.message = "Creating generation #%d" % self.gen
def result_f(*args):
self.refiner.update(*args)
result_func(*args)
population = self.do_async_evaluation(
self.toolbox.generate, result_func=result_f
)
if self.halloffame is not None:
self.halloffame.update(population)
return population
def _tell(self, population):
self.refiner.status.message = "Updating strategy"
self.toolbox.update(population)
def _record(self, population):
self.refiner.status.message = "Processing ..."
# Get the best solution so far:
best = self.halloffame.get_best()
best_f = best.fitness.values[0]
pop_size = len(population)
# Calculate stats & print something if needed:
record = self.stats.compile(population)
if self.verbose:
self.logbook.record(gen=self.gen, evals=pop_size, best=best_f, **record)
print(self.logbook.stream)
self.refiner.status.message = "Refiner update ..."
# Update the refiner:
self.refiner.update(best, iteration=self.gen, residual=best_f)
pass #end of class
class RefineCMAESRun(RefineMethod):
"""
The DEAP CMA-ES algorithm implementation with added stagnation thresholds
"""
name = "CMA-ES refinement"
description = "This algorithm uses the CMA-ES refinement strategy as implemented by DEAP"
index = 1
disabled = False
ngen = RefineMethodOption('Maximum # of generations', NGEN, [1, 10000], int)
stagn_ngen = RefineMethodOption('Minimum # of generations', STAGN_NGEN, [1, 10000], int)
stagn_tol = RefineMethodOption('Fitness slope tolerance', STAGN_TOL, [0., 100.], float)
def _individual_creator(self, refiner, bounds):
creator.create(
"Individual", pyxrd_array,
fitness=FitnessMin, # @UndefinedVariable
refiner=refiner,
min_bounds=bounds[:, 0].copy(),
max_bounds=bounds[:, 1].copy(),
)
def create_individual(lst):
arr = np.array(lst).clip(bounds[:, 0], bounds[:, 1]) #@UndefinedVariable
return creator.Individual(arr) # @UndefinedVariable
return create_individual
def _create_stats(self):
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0) #@UndefinedVariable
stats.register("std", np.std, axis=0) #@UndefinedVariable
stats.register("min", np.min, axis=0) #@UndefinedVariable
stats.register("max", np.max, axis=0) #@UndefinedVariable
return stats
_has_been_setup = False
def _setup(self, refiner, ngen=NGEN, stagn_ngen=STAGN_NGEN, stagn_tol=STAGN_TOL, **kwargs):
if not self._has_been_setup:
logger.info("Setting up the DEAP CMA-ES refinement algorithm (ngen=%d)" % ngen)
refiner.status.message = "Setting up algorithm..."
# Process some general stuff:
bounds = np.array(refiner.ranges) #@UndefinedVariable
create_individual = self._individual_creator(refiner, bounds)
# Setup strategy:
centroid = create_individual(refiner.history.initial_solution)
strat_kwargs = {}
if "lambda_" in kwargs:
strat_kwargs["lambda_"] = kwargs.pop("lambda_")
strategy = Strategy(
centroid=centroid, sigma=1.0 / 10.0, ranges=bounds,
stop=self._stop, **strat_kwargs
)
# Toolbox setup:
toolbox = base.Toolbox()
toolbox.register("generate", strategy.generate, create_individual)
toolbox.register("update", strategy.update)
# Hall of fame & stats:
logger.info("Creating hall-off-fame and statistics")
halloffame = PyXRDParetoFront(similar=lambda a1, a2: np.all(a1 == a2)) #@UndefinedVariable
stats = self._create_stats()
# Create algorithm
self.algorithm = Algorithm(
toolbox, halloffame, stats, ngen=ngen,
stagn_ngen=stagn_ngen, stagn_tol=stagn_tol, refiner=refiner, stop=self._stop)
self._has_been_setup = True
return self.algorithm
def run(self, refiner, **kwargs):
logger.info("CMA-ES run invoked with %s" % kwargs)
self._has_been_setup = False #clear this for a new refinement
algorithm = self._setup(refiner, **kwargs)
# Get this show on the road:
logger.info("Running the CMA-ES algorithm...")
algorithm.run()
pass # end of class
|
from __future__ import print_function, division, absolute_import
import functools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import PIL.Image
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageFilter
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import random as iarandom
from imgaug.testutils import reseed
def _test_shape_hw(func):
img = np.arange(20*10).reshape((20, 10)).astype(np.uint8)
observed = func(np.copy(img))
expected = func(
np.tile(np.copy(img)[:, :, np.newaxis], (1, 1, 3)),
)[:, :, 0]
assert observed.dtype.name == "uint8"
assert observed.shape == (20, 10)
assert np.array_equal(observed, expected)
def _test_shape_hw1(func):
img = np.arange(20*10*1).reshape((20, 10, 1)).astype(np.uint8)
observed = func(np.copy(img))
expected = func(
np.tile(np.copy(img), (1, 1, 3)),
)[:, :, 0:1]
assert observed.dtype.name == "uint8"
assert observed.shape == (20, 10, 1)
assert np.array_equal(observed, expected)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.pillike.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.pillike.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.solarize_, threshold=5)
_test_shape_hw(func)
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.solarize_, threshold=5)
_test_shape_hw1(func)
class Test_solarize(unittest.TestCase):
def test_compare_with_pil(self):
def _solarize_pil(image, threshold):
img = PIL.Image.fromarray(image)
return np.asarray(PIL.ImageOps.solarize(img, threshold))
images = [
np.mod(np.arange(20*20*3), 255).astype(np.uint8)\
.reshape((20, 20, 3)),
iarandom.RNG(0).integers(0, 256, size=(1, 1, 3), dtype="uint8"),
iarandom.RNG(1).integers(0, 256, size=(20, 20, 3), dtype="uint8"),
iarandom.RNG(2).integers(0, 256, size=(40, 40, 3), dtype="uint8"),
iarandom.RNG(0).integers(0, 256, size=(20, 20), dtype="uint8")
]
for image_idx, image in enumerate(images):
for threshold in np.arange(256):
with self.subTest(image_idx=image_idx, threshold=threshold):
image_pil = _solarize_pil(image, threshold)
image_iaa = iaa.pillike.solarize(image, threshold)
assert np.array_equal(image_pil, image_iaa)
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.solarize, threshold=5)
_test_shape_hw(func)
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.solarize, threshold=5)
_test_shape_hw1(func)
class Test_posterize(unittest.TestCase):
def test_by_comparison_with_pil(self):
image = np.arange(64*64*3).reshape((64, 64, 3))
image = np.mod(image, 255).astype(np.uint8)
for nb_bits in [1, 2, 3, 4, 5, 6, 7, 8]:
image_iaa = iaa.pillike.posterize(np.copy(image), nb_bits)
image_pil = np.asarray(
PIL.ImageOps.posterize(
PIL.Image.fromarray(image),
nb_bits
)
)
assert np.array_equal(image_iaa, image_pil)
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.posterize, bits=2)
_test_shape_hw(func)
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.posterize, bits=2)
_test_shape_hw1(func)
class Test_equalize(unittest.TestCase):
def test_by_comparison_with_pil(self):
shapes = [
(1, 1),
(2, 1),
(1, 2),
(2, 2),
(5, 5),
(10, 5),
(5, 10),
(10, 10),
(20, 20),
(100, 100),
(100, 200),
(200, 100),
(200, 200)
]
shapes = shapes + [shape + (3,) for shape in shapes]
rng = iarandom.RNG(0)
images = [rng.integers(0, 255, size=shape).astype(np.uint8)
for shape in shapes]
images = images + [
np.full((10, 10), 0, dtype=np.uint8),
np.full((10, 10), 128, dtype=np.uint8),
np.full((10, 10), 255, dtype=np.uint8)
]
for i, image in enumerate(images):
mask_vals = [False, True] if image.size >= (100*100) else [False]
for use_mask in mask_vals:
with self.subTest(image_idx=i, shape=image.shape,
use_mask=use_mask):
mask_np = None
mask_pil = None
if use_mask:
mask_np = np.zeros(image.shape[0:2], dtype=np.uint8)
mask_np[25:75, 25:75] = 1
mask_pil = PIL.Image.fromarray(mask_np).convert("L")
image_iaa = iaa.pillike.equalize(image, mask=mask_np)
image_pil = np.asarray(
PIL.ImageOps.equalize(
PIL.Image.fromarray(image),
mask=mask_pil
)
)
assert np.array_equal(image_iaa, image_pil)
def test_unusual_channel_numbers(self):
nb_channels_lst = [1, 2, 4, 5, 512, 513]
for nb_channels in nb_channels_lst:
for size in [20, 100]:
with self.subTest(nb_channels=nb_channels,
size=size):
shape = (size, size, nb_channels)
image = iarandom.RNG(0).integers(50, 150, size=shape)
image = image.astype(np.uint8)
image_aug = iaa.pillike.equalize(image)
if size > 1:
channelwise_sums = np.sum(image_aug, axis=(0, 1))
assert np.all(channelwise_sums > 0)
assert np.min(image_aug) < 50
assert np.max(image_aug) > 150
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.pillike.equalize(image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.equalize)
_test_shape_hw(func)
# already covered by unusal channel numbers test, but we run this one here
# anyways for consistency with other tests and because it works a bit
# different
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.equalize)
_test_shape_hw1(func)
class Test_autocontrast(unittest.TestCase):
def test_by_comparison_with_pil(self):
rng = iarandom.RNG(0)
shapes = [
(1, 1),
(10, 10),
(1, 1, 3),
(1, 2, 3),
(2, 1, 3),
(2, 2, 3),
(5, 3, 3),
(10, 5, 3),
(5, 10, 3),
(10, 10, 3),
(20, 10, 3),
(20, 40, 3),
(50, 60, 3),
(100, 100, 3),
(200, 100, 3)
]
images = [
rng.integers(0, 255, size=shape).astype(np.uint8)
for shape in shapes
]
images = (
images
+ [
np.full((1, 1, 3), 0, dtype=np.uint8),
np.full((1, 1, 3), 255, dtype=np.uint8),
np.full((20, 20, 3), 0, dtype=np.uint8),
np.full((20, 20, 3), 255, dtype=np.uint8)
]
)
cutoffs = [0, 1, 2, 10, 50, 90, 99, 100]
ignores = [None, 0, 1, 100, 255, [0, 1], [5, 10, 50], [99, 100]]
for cutoff in cutoffs:
for ignore in ignores:
for i, image in enumerate(images):
with self.subTest(cutoff=cutoff, ignore=ignore,
image_idx=i, image_shape=image.shape):
result_pil = np.asarray(
PIL.ImageOps.autocontrast(
PIL.Image.fromarray(image),
cutoff=cutoff,
ignore=ignore
)
)
result_iaa = iaa.pillike.autocontrast(image,
cutoff=cutoff,
ignore=ignore)
assert np.array_equal(result_pil, result_iaa)
def test_unusual_channel_numbers(self):
nb_channels_lst = [1, 2, 4, 5, 512, 513]
for nb_channels in nb_channels_lst:
for size in [20]:
for cutoff in [0, 1, 10]:
with self.subTest(nb_channels=nb_channels,
size=size,
cutoff=cutoff):
shape = (size, size, nb_channels)
image = iarandom.RNG(0).integers(50, 150, size=shape)
image = image.astype(np.uint8)
image_aug = iaa.pillike.autocontrast(image,
cutoff=cutoff)
if size > 1:
channelwise_sums = np.sum(image_aug, axis=(0, 1))
assert np.all(channelwise_sums > 0)
assert np.min(image_aug) < 50
assert np.max(image_aug) > 150
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for cutoff in [0, 1, 10]:
for ignore in [None, 0, 1, [0, 1, 10]]:
with self.subTest(shape=shape, cutoff=cutoff,
ignore=ignore):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.pillike.autocontrast(image,
cutoff=cutoff,
ignore=ignore)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.autocontrast)
_test_shape_hw(func)
# already covered by unusal channel numbers test, but we run this one here
# anyways for consistency with other tests and because it works a bit
# different
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.autocontrast)
_test_shape_hw1(func)
# TODO add test for unusual channel numbers
class _TestEnhanceFunc(unittest.TestCase):
def _test_by_comparison_with_pil(
self, func, cls,
factors=(0.0, 0.01, 0.1, 0.5, 0.95, 0.99, 1.0, 1.05, 1.5, 2.0,
3.0)):
shapes = [(224, 224, 3), (32, 32, 3), (16, 8, 3), (1, 1, 3),
(32, 32, 4)]
seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for seed in seeds:
for shape in shapes:
for factor in factors:
with self.subTest(shape=shape, seed=seed, factor=factor):
image = iarandom.RNG(seed).integers(
0, 256, size=shape, dtype="uint8")
image_iaa = func(image, factor)
image_pil = np.asarray(
cls(
PIL.Image.fromarray(image)
).enhance(factor)
)
assert np.array_equal(image_iaa, image_pil)
def _test_zero_sized_axes(self, func,
factors=(0.0, 0.4, 1.0)):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for factor in factors:
with self.subTest(shape=shape, factor=factor):
image = np.zeros(shape, dtype=np.uint8)
image_aug = func(image, factor=factor)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
@classmethod
def _test_image_shape_hw(self, func):
func = functools.partial(func, factor=0.2)
_test_shape_hw(func)
@classmethod
def _test_image_shape_hw1(self, func):
func = functools.partial(func, factor=0.2)
_test_shape_hw1(func)
class Test_enhance_color(_TestEnhanceFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.enhance_color,
PIL.ImageEnhance.Color)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.enhance_color)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.enhance_color)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.enhance_color)
class Test_enhance_contrast(_TestEnhanceFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.enhance_contrast,
PIL.ImageEnhance.Contrast)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.enhance_contrast)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.enhance_contrast)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.enhance_contrast)
class Test_enhance_brightness(_TestEnhanceFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.enhance_brightness,
PIL.ImageEnhance.Brightness)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.enhance_brightness)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.enhance_brightness)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.enhance_brightness)
class Test_enhance_sharpness(_TestEnhanceFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.enhance_sharpness,
PIL.ImageEnhance.Sharpness)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.enhance_brightness,
factors=[0.0, 0.4, 1.0, 1.5, 2.0])
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.enhance_sharpness)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.enhance_sharpness)
class _TestFilterFunc(unittest.TestCase):
def _test_by_comparison_with_pil(self, func, pil_kernel):
shapes = [(224, 224, 3), (32, 32, 3), (16, 8, 3), (1, 1, 3),
(32, 32, 4)]
seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for seed in seeds:
for shape in shapes:
with self.subTest(shape=shape, seed=seed):
image = iarandom.RNG(seed).integers(
0, 256, size=shape, dtype="uint8")
image_iaa = func(image)
image_pil = np.asarray(
PIL.Image.fromarray(image).filter(pil_kernel)
)
assert np.array_equal(image_iaa, image_pil)
def _test_zero_sized_axes(self, func):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = func(image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
@classmethod
def _test_image_shape_hw(self, func):
_test_shape_hw(func)
@classmethod
def _test_image_shape_hw1(self, func):
_test_shape_hw1(func)
class Test_filter_blur(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_blur,
PIL.ImageFilter.BLUR)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_blur)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_blur)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_blur)
class Test_filter_smooth(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_smooth,
PIL.ImageFilter.SMOOTH)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_smooth)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_smooth)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_smooth)
class Test_filter_smooth_more(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_smooth_more,
PIL.ImageFilter.SMOOTH_MORE)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_smooth_more)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_smooth_more)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_smooth_more)
class Test_filter_edge_enhance(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_edge_enhance,
PIL.ImageFilter.EDGE_ENHANCE)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_edge_enhance)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_edge_enhance)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_edge_enhance)
class Test_filter_edge_enhance_more(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_edge_enhance_more,
PIL.ImageFilter.EDGE_ENHANCE_MORE)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_edge_enhance_more)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_edge_enhance_more)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_edge_enhance_more)
class Test_filter_find_edges(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_find_edges,
PIL.ImageFilter.FIND_EDGES)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_find_edges)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_find_edges)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_find_edges)
class Test_filter_contour(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_contour,
PIL.ImageFilter.CONTOUR)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_contour)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_contour)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_contour)
class Test_filter_emboss(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_emboss,
PIL.ImageFilter.EMBOSS)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_emboss)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_emboss)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_emboss)
class Test_filter_sharpen(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_sharpen,
PIL.ImageFilter.SHARPEN)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_sharpen)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_sharpen)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_sharpen)
class Test_filter_detail(_TestFilterFunc):
def test_by_comparison_with_pil(self):
self._test_by_comparison_with_pil(iaa.pillike.filter_detail,
PIL.ImageFilter.DETAIL)
def test_zero_sized_axes(self):
self._test_zero_sized_axes(iaa.pillike.filter_detail)
def test_image_shape_hw(self):
self._test_image_shape_hw(iaa.pillike.filter_detail)
def test_image_shape_hw1(self):
self._test_image_shape_hw1(iaa.pillike.filter_detail)
class Test_warp_affine(unittest.TestCase):
def _test_aff_by_comparison_with_pil(self, arg_name, arg_values,
matrix_gen):
shapes = [(64, 64, 3), (32, 32, 3), (16, 8, 3), (1, 1, 3),
(32, 32, 4)]
seeds = [1, 2, 3]
fillcolors = [None, 0, 128, (0, 255, 0)]
for shape in shapes:
for seed in seeds:
for fillcolor in fillcolors:
for arg_value in arg_values:
with self.subTest(shape=shape, seed=seed,
fillcolor=fillcolor,
**{arg_name: arg_value}):
image = iarandom.RNG(seed).integers(
0, 256, size=shape, dtype="uint8")
matrix = matrix_gen(arg_value)
image_warped = iaa.pillike.warp_affine(
image,
fillcolor=fillcolor,
center=(0.0, 0.0),
**{arg_name: arg_value})
image_warped_exp = np.asarray(
PIL.Image.fromarray(
image
).transform(shape[0:2][::-1],
PIL.Image.AFFINE,
matrix[:2, :].flat,
fillcolor=fillcolor)
)
assert np.array_equal(image_warped,
image_warped_exp)
def test_scale_x_by_comparison_with_pil(self):
def _matrix_gen(scale):
return np.float32([
[1/scale, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"scale_x",
[0.01, 0.1, 0.9, 1.0, 1.5, 3.0],
_matrix_gen
)
def test_scale_y_by_comparison_with_pil(self):
def _matrix_gen(scale):
return np.float32([
[1, 0, 0],
[0, 1/scale, 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"scale_y",
[0.01, 0.1, 0.9, 1.0, 1.5, 3.0],
_matrix_gen
)
def test_translate_x_by_comparison_with_pil(self):
def _matrix_gen(translate):
return np.float32([
[1, 0, -translate],
[0, 1, 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"translate_x_px",
[-50, -10, -1, 0, 1, 10, 50],
_matrix_gen
)
def test_translate_y_by_comparison_with_pil(self):
def _matrix_gen(translate):
return np.float32([
[1, 0, 0],
[0, 1, -translate],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"translate_y_px",
[-50, -10, -1, 0, 1, 10, 50],
_matrix_gen
)
def test_rotate_by_comparison_with_pil(self):
def _matrix_gen(rotate):
r = np.deg2rad(rotate)
return np.float32([
[np.cos(r), np.sin(r), 0],
[-np.sin(r), np.cos(r), 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"rotate_deg",
[-50, -10, -1, 0, 1, 10, 50],
_matrix_gen
)
def test_shear_x_by_comparison_with_pil(self):
def _matrix_gen(shear):
s = (-1) * np.deg2rad(shear)
return np.float32([
[1, np.tanh(s), 0],
[0, 1, 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"shear_x_deg",
[-50, -10, -1, 0, 1, 10, 50],
_matrix_gen
)
def test_shear_y_by_comparison_with_pil(self):
def _matrix_gen(shear):
s = (-1) * np.deg2rad(shear)
return np.float32([
[1, 0, 0],
[np.tanh(s), 1, 0],
[0, 0, 1]
])
self._test_aff_by_comparison_with_pil(
"shear_y_deg",
[-50, -10, -1, 0, 1, 10, 50],
_matrix_gen
)
def test_scale_x(self):
image = np.zeros((100, 100, 3), dtype=np.uint8)
image[50, 60] = 255
image_aug = iaa.pillike.warp_affine(image, scale_x=1.5)
y, x = np.unravel_index(np.argmax(image_aug[..., 0]),
image_aug.shape[0:2])
assert 50 - 1 <= y <= 50 + 1
assert x > 60
def test_scale_y(self):
image = np.zeros((100, 100, 3), dtype=np.uint8)
image[60, 50] = 255
image_aug = iaa.pillike.warp_affine(image, scale_y=1.5)
y, x = np.unravel_index(np.argmax(image_aug[..., 0]),
image_aug.shape[0:2])
assert 50 - 1 <= x <= 50 + 1
assert y > 60
def test_translate_x_px(self):
image = np.zeros((20, 20, 3), dtype=np.uint8)
image[10, 15] = 255
image_aug = iaa.pillike.warp_affine(image, translate_x_px=1)
assert image_aug[10, 15, 0] == 0
assert image_aug[10, 16, 0] == 255
assert np.all(image_aug[0, :] == 0)
def test_translate_y_px(self):
image = np.zeros((20, 20, 3), dtype=np.uint8)
image[15, 10] = 255
image_aug = iaa.pillike.warp_affine(image, translate_y_px=1)
assert image_aug[15, 10, 0] == 0
assert image_aug[16, 10, 0] == 255
assert np.all(image_aug[:, 0] == 0)
def test_rotate(self):
image = np.zeros((20, 20, 3), dtype=np.uint8)
image[0, 10] = 255
image_aug = iaa.pillike.warp_affine(image,
rotate_deg=45,
center=(0.0, 0.0))
assert image_aug[7, 7, 0] == 255
def test_shear_x(self):
image = np.zeros((20, 20, 3), dtype=np.uint8)
image[5, 10] = 255
image_aug = iaa.pillike.warp_affine(image,
shear_x_deg=20,
center=(0.0, 0.0))
y, x = np.unravel_index(np.argmax(image_aug[..., 0]),
image_aug.shape[0:2])
assert y == 5
assert x > 10
def test_shear_y(self):
image = np.zeros((20, 20, 3), dtype=np.uint8)
image[10, 15] = 255
image_aug = iaa.pillike.warp_affine(image,
shear_y_deg=20,
center=(0.0, 0.0))
y, x = np.unravel_index(np.argmax(image_aug[..., 0]),
image_aug.shape[0:2])
assert y > 10
assert x == 15
def test_fillcolor_is_none(self):
image = np.ones((20, 20, 3), dtype=np.uint8)
image_aug = iaa.pillike.warp_affine(image,
translate_x_px=1,
fillcolor=None)
assert np.all(image_aug[:, :1, :] == 0)
assert np.all(image_aug[:, 1:, :] == 1)
def test_fillcolor_is_int(self):
image = np.ones((20, 20, 3), dtype=np.uint8)
image_aug = iaa.pillike.warp_affine(image,
translate_x_px=1,
fillcolor=128)
assert np.all(image_aug[:, :1, 0] == 128)
assert np.all(image_aug[:, :1, 1] == 0)
assert np.all(image_aug[:, :1, 2] == 0)
assert np.all(image_aug[:, 1:, :] == 1)
def test_fillcolor_is_int_grayscale(self):
image = np.ones((20, 20), dtype=np.uint8)
image_aug = iaa.pillike.warp_affine(image,
translate_x_px=1,
fillcolor=128)
assert np.all(image_aug[:, :1] == 128)
assert np.all(image_aug[:, 1:] == 1)
def test_fillcolor_is_tuple(self):
image = np.ones((20, 20, 3), dtype=np.uint8)
image_aug = iaa.pillike.warp_affine(image,
translate_x_px=1,
fillcolor=(2, 3, 4))
assert np.all(image_aug[:, :1, 0] == 2)
assert np.all(image_aug[:, :1, 1] == 3)
assert np.all(image_aug[:, :1, 2] == 4)
assert np.all(image_aug[:, 1:, :] == 1)
def test_fillcolor_is_tuple_more_values_than_channels(self):
image = np.ones((20, 20, 3), dtype=np.uint8)
image_aug = iaa.pillike.warp_affine(image,
translate_x_px=1,
fillcolor=(2, 3, 4, 5))
assert image_aug.shape == (20, 20, 3)
assert np.all(image_aug[:, :1, 0] == 2)
assert np.all(image_aug[:, :1, 1] == 3)
assert np.all(image_aug[:, :1, 2] == 4)
assert np.all(image_aug[:, 1:, :] == 1)
def test_center(self):
image = np.zeros((21, 21, 3), dtype=np.uint8)
image[2, 10] = 255
image_aug = iaa.pillike.warp_affine(image,
rotate_deg=90,
center=(0.5, 0.5))
assert image_aug[10, 18, 0] == 255
def test_image_shape_hw(self):
func = functools.partial(iaa.pillike.warp_affine, rotate_deg=90)
_test_shape_hw(func)
def test_image_shape_hw1(self):
func = functools.partial(iaa.pillike.warp_affine, rotate_deg=90)
_test_shape_hw1(func)
class TestSolarize(unittest.TestCase):
def setUp(self):
reseed()
def test_returns_correct_instance(self):
aug = iaa.pillike.Solarize()
assert isinstance(aug, iaa.Invert)
assert aug.per_channel.value == 0
assert aug.min_value is None
assert aug.max_value is None
assert np.isclose(aug.threshold.value, 128)
assert aug.invert_above_threshold.value == 1
class TestPosterize(unittest.TestCase):
def setUp(self):
reseed()
def test_returns_posterize(self):
aug = iaa.pillike.Posterize()
assert isinstance(aug, iaa.Posterize)
class TestEqualize(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.equalize_")
def test_mocked(self, mock_eq):
image = np.arange(1*1*3).astype(np.uint8).reshape((1, 1, 3))
mock_eq.return_value = np.copy(image)
aug = iaa.pillike.Equalize()
_image_aug = aug(image=image)
assert mock_eq.call_count == 1
assert np.array_equal(mock_eq.call_args_list[0][0][0], image)
def test_integrationtest(self):
rng = iarandom.RNG(0)
for size in [20, 100]:
shape = (size, size, 3)
image = rng.integers(50, 150, size=shape)
image = image.astype(np.uint8)
aug = iaa.pillike.Equalize()
image_aug = aug(image=image)
if size > 1:
channelwise_sums = np.sum(image_aug, axis=(0, 1))
assert np.all(channelwise_sums > 0)
assert np.min(image_aug) < 50
assert np.max(image_aug) > 150
class TestAutocontrast(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.autocontrast")
def test_mocked(self, mock_auto):
image = np.mod(np.arange(10*10*3), 255)
image = image.reshape((10, 10, 3)).astype(np.uint8)
mock_auto.return_value = image
aug = iaa.pillike.Autocontrast(15)
_image_aug = aug(image=image)
assert np.array_equal(mock_auto.call_args_list[0][0][0], image)
assert mock_auto.call_args_list[0][0][1] == 15
@mock.patch("imgaug.augmenters.pillike.autocontrast")
def test_per_channel(self, mock_auto):
image = np.mod(np.arange(10*10*1), 255)
image = image.reshape((10, 10, 1)).astype(np.uint8)
image = np.tile(image, (1, 1, 100))
mock_auto.return_value = image[..., 0]
aug = iaa.pillike.Autocontrast((0, 30), per_channel=True)
_image_aug = aug(image=image)
assert mock_auto.call_count == 100
cutoffs = []
for i in np.arange(100):
assert np.array_equal(mock_auto.call_args_list[i][0][0],
image[..., i])
cutoffs.append(mock_auto.call_args_list[i][0][1])
assert len(set(cutoffs)) > 10
def test_integrationtest(self):
image = iarandom.RNG(0).integers(50, 150, size=(100, 100, 3))
image = image.astype(np.uint8)
aug = iaa.pillike.Autocontrast(10)
image_aug = aug(image=image)
assert np.min(image_aug) < 50
assert np.max(image_aug) > 150
def test_integrationtest_per_channel(self):
image = iarandom.RNG(0).integers(50, 150, size=(100, 100, 50))
image = image.astype(np.uint8)
aug = iaa.pillike.Autocontrast(10, per_channel=True)
image_aug = aug(image=image)
assert np.min(image_aug) < 50
assert np.max(image_aug) > 150
class TestEnhanceColor(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.pillike.EnhanceColor()
assert np.isclose(aug.factor.a.value, 0.0)
assert np.isclose(aug.factor.b.value, 3.0)
def test___init___custom(self):
aug = iaa.pillike.EnhanceColor(0.75)
assert np.isclose(aug.factor.value, 0.75)
@mock.patch("imgaug.augmenters.pillike.enhance_color")
def test_mocked(self, mock_pilcol):
aug = iaa.pillike.EnhanceColor(0.75)
image = np.zeros((1, 1, 3), dtype=np.uint8)
mock_pilcol.return_value = np.full((1, 1, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilcol.call_count == 1
assert ia.is_np_array(mock_pilcol.call_args_list[0][0][0])
assert np.isclose(mock_pilcol.call_args_list[0][0][1], 0.75, rtol=0,
atol=1e-4)
assert np.all(image_aug == 128)
def test_simple_image(self):
aug = iaa.pillike.EnhanceColor(0.0)
image = np.zeros((1, 1, 3), dtype=np.uint8)
image[:, :, 0] = 255
image[:, :, 1] = 255
image_aug = aug(image=image)
assert image_aug[:, :, 2] > 200
assert np.all(image_aug[:, :, 0] == image_aug[:, :, 1])
assert np.all(image_aug[:, :, 0] == image_aug[:, :, 2])
def test_batch_contains_no_images(self):
aug = iaa.pillike.EnhanceColor(0.75)
hm_arr = np.ones((3, 3, 1), dtype=np.float32)
hm = ia.HeatmapsOnImage(hm_arr, shape=(3, 3, 3))
hm_aug = aug(heatmaps=hm)
assert np.allclose(hm_aug.get_arr(), hm.get_arr())
def test_get_parameters(self):
aug = iaa.pillike.EnhanceColor(0.75)
params = aug.get_parameters()
assert params[0] is aug.factor
# we don't have to test very much here, because some functions of the base
# class are already tested via EnhanceColor
class TestEnhanceContrast(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.enhance_contrast")
def test_mocked(self, mock_pilco):
aug = iaa.pillike.EnhanceContrast(0.75)
image = np.zeros((1, 1, 3), dtype=np.uint8)
mock_pilco.return_value = np.full((1, 1, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilco.call_count == 1
assert ia.is_np_array(mock_pilco.call_args_list[0][0][0])
assert np.isclose(mock_pilco.call_args_list[0][0][1], 0.75, rtol=0,
atol=1e-4)
assert np.all(image_aug == 128)
def test_simple_image(self):
aug = iaa.pillike.EnhanceContrast(0.0)
image = np.full((2, 2, 3), 128, dtype=np.uint8)
image[0, :, :] = 200
image_aug = aug(image=image)
diff_before = np.average(np.abs(image.astype(np.int32)
- np.average(image)))
diff_after = np.average(np.abs(image_aug.astype(np.int32)
- np.average(image_aug)))
assert diff_after < diff_before
def test_batch_contains_no_images(self):
aug = iaa.pillike.EnhanceContrast(0.75)
hm_arr = np.ones((3, 3, 1), dtype=np.float32)
hm = ia.HeatmapsOnImage(hm_arr, shape=(3, 3, 3))
hm_aug = aug(heatmaps=hm)
assert np.allclose(hm_aug.get_arr(), hm.get_arr())
# we don't have to test very much here, because some functions of the base
# class are already tested via EnhanceColor
class TestEnhanceBrightness(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.enhance_brightness")
def test_mocked(self, mock_pilbr):
aug = iaa.pillike.EnhanceBrightness(0.75)
image = np.zeros((1, 1, 3), dtype=np.uint8)
mock_pilbr.return_value = np.full((1, 1, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilbr.call_count == 1
assert ia.is_np_array(mock_pilbr.call_args_list[0][0][0])
assert np.isclose(mock_pilbr.call_args_list[0][0][1], 0.75, rtol=0,
atol=1e-4)
assert np.all(image_aug == 128)
def test_simple_image(self):
aug = iaa.pillike.EnhanceBrightness(0.0)
image = np.full((2, 2, 3), 255, dtype=np.uint8)
image_aug = aug(image=image)
assert np.all(image_aug < 255)
def test_batch_contains_no_images(self):
aug = iaa.pillike.EnhanceBrightness(0.75)
hm_arr = np.ones((3, 3, 1), dtype=np.float32)
hm = ia.HeatmapsOnImage(hm_arr, shape=(3, 3, 3))
hm_aug = aug(heatmaps=hm)
assert np.allclose(hm_aug.get_arr(), hm.get_arr())
# we don't have to test very much here, because some functions of the base
# class are already tested via EnhanceColor
class TestEnhanceSharpness(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.enhance_sharpness")
def test_mocked(self, mock_pilsh):
aug = iaa.pillike.EnhanceSharpness(0.75)
image = np.zeros((3, 3, 3), dtype=np.uint8)
mock_pilsh.return_value = np.full((3, 3, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilsh.call_count == 1
assert ia.is_np_array(mock_pilsh.call_args_list[0][0][0])
assert np.isclose(mock_pilsh.call_args_list[0][0][1], 0.75, rtol=0,
atol=1e-4)
assert np.all(image_aug == 128)
def test_simple_image(self):
aug = iaa.pillike.EnhanceSharpness(2.0)
image = np.full((3, 3, 3), 64, dtype=np.uint8)
image[1, 1, :] = 128
image_aug = aug(image=image)
assert np.all(image_aug[1, 1, :] > 128)
def test_batch_contains_no_images(self):
aug = iaa.pillike.EnhanceSharpness(0.75)
hm_arr = np.ones((3, 3, 1), dtype=np.float32)
hm = ia.HeatmapsOnImage(hm_arr, shape=(3, 3, 3))
hm_aug = aug(heatmaps=hm)
assert np.allclose(hm_aug.get_arr(), hm.get_arr())
class _TestFilter(unittest.TestCase):
def _test___init__(self, cls, func):
aug = cls()
assert aug.func is func
def _test_image(self, cls, pil_kernel):
image = ia.quokka(0.25)
image_aug = cls()(image=image)
image_aug_pil = PIL.Image.fromarray(image).filter(pil_kernel)
assert np.array_equal(image_aug, image_aug_pil)
class FilterBlur(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterBlur,
iaa.pillike.filter_blur)
def test_image(self):
self._test_image(iaa.pillike.FilterBlur,
PIL.ImageFilter.BLUR)
class FilterSmooth(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterSmooth,
iaa.pillike.filter_smooth)
def test_image(self):
self._test_image(iaa.pillike.FilterSmooth,
PIL.ImageFilter.SMOOTH)
class FilterSmoothMore(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterSmoothMore,
iaa.pillike.filter_smooth_more)
def test_image(self):
self._test_image(iaa.pillike.FilterSmoothMore,
PIL.ImageFilter.SMOOTH_MORE)
class FilterEdgeEnhance(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterEdgeEnhance,
iaa.pillike.filter_edge_enhance)
def test_image(self):
self._test_image(iaa.pillike.FilterEdgeEnhance,
PIL.ImageFilter.EDGE_ENHANCE)
class FilterEdgeEnhanceMore(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterEdgeEnhanceMore,
iaa.pillike.filter_edge_enhance_more)
def test_image(self):
self._test_image(iaa.pillike.FilterEdgeEnhanceMore,
PIL.ImageFilter.EDGE_ENHANCE_MORE)
class FilterFindEdges(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterFindEdges,
iaa.pillike.filter_find_edges)
def test_image(self):
self._test_image(iaa.pillike.FilterFindEdges,
PIL.ImageFilter.FIND_EDGES)
class FilterContour(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterContour,
iaa.pillike.filter_contour)
def test_image(self):
self._test_image(iaa.pillike.FilterContour,
PIL.ImageFilter.CONTOUR)
class FilterEmboss(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterEmboss,
iaa.pillike.filter_emboss)
def test_image(self):
self._test_image(iaa.pillike.FilterEmboss,
PIL.ImageFilter.EMBOSS)
class FilterSharpen(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterSharpen,
iaa.pillike.filter_sharpen)
def test_image(self):
self._test_image(iaa.pillike.FilterSharpen,
PIL.ImageFilter.SHARPEN)
class FilterDetail(_TestFilter):
def test___init__(self):
self._test___init__(iaa.pillike.FilterDetail,
iaa.pillike.filter_detail)
def test_image(self):
self._test_image(iaa.pillike.FilterDetail,
PIL.ImageFilter.DETAIL)
class TestAffine(unittest.TestCase):
def setUp(self):
reseed()
@mock.patch("imgaug.augmenters.pillike.warp_affine")
def test_mocked(self, mock_pilaff):
aug = iaa.pillike.Affine(
scale={"x": 1.25, "y": 1.5},
translate_px={"x": 10, "y": 20},
rotate=30,
shear={"x": 40, "y": 50},
fillcolor=100,
center=(0.1, 0.2)
)
image = np.zeros((3, 3, 3), dtype=np.uint8)
mock_pilaff.return_value = np.full((3, 3, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilaff.call_count == 1
args = mock_pilaff.call_args_list[0][0]
assert np.all(args[0] == 128) # due to in-place change
kwargs = mock_pilaff.call_args_list[0][1]
assert np.isclose(kwargs["scale_x"], 1.25)
assert np.isclose(kwargs["scale_y"], 1.5)
assert np.isclose(kwargs["translate_x_px"], 10)
assert np.isclose(kwargs["translate_y_px"], 20)
assert np.isclose(kwargs["rotate_deg"], 30)
assert np.isclose(kwargs["shear_x_deg"], 40)
assert np.isclose(kwargs["shear_y_deg"], 50)
assert np.isclose(kwargs["fillcolor"][0], 100)
assert np.isclose(kwargs["fillcolor"][1], 100)
assert np.isclose(kwargs["fillcolor"][2], 100)
assert np.isclose(kwargs["center"][0], 0.1)
assert np.isclose(kwargs["center"][1], 0.2)
assert np.all(image_aug == 128)
@mock.patch("imgaug.augmenters.pillike.warp_affine")
def test_mocked_translate_percent(self, mock_pilaff):
aug = iaa.pillike.Affine(
translate_percent={"x": 1.2, "y": 1.5}
)
image = np.zeros((20, 50, 3), dtype=np.uint8)
mock_pilaff.return_value = np.full((20, 50, 3), 128, dtype=np.uint8)
image_aug = aug(image=image)
assert mock_pilaff.call_count == 1
args = mock_pilaff.call_args_list[0][0]
assert np.all(args[0] == 128) # due to in-place change
kwargs = mock_pilaff.call_args_list[0][1]
assert np.isclose(kwargs["scale_x"], 1.0)
assert np.isclose(kwargs["scale_y"], 1.0)
assert np.isclose(kwargs["translate_x_px"], 50*1.2)
assert np.isclose(kwargs["translate_y_px"], 20*1.5)
assert np.isclose(kwargs["rotate_deg"], 0)
assert np.isclose(kwargs["shear_x_deg"], 0)
assert np.isclose(kwargs["shear_y_deg"], 0)
assert np.isclose(kwargs["fillcolor"][0], 0)
assert np.isclose(kwargs["fillcolor"][1], 0)
assert np.isclose(kwargs["fillcolor"][2], 0)
assert np.isclose(kwargs["center"][0], 0.5)
assert np.isclose(kwargs["center"][1], 0.5)
assert np.all(image_aug == 128)
def test_parameters_affect_images(self):
params = [
("scale", {"x": 1.3}),
("scale", {"y": 1.5}),
("translate_px", {"x": 5}),
("translate_px", {"y": 10}),
("translate_percent", {"x": 0.3}),
("translate_percent", {"y": 0.4}),
("rotate", 10),
("shear", {"x": 20}),
("shear", {"y": 20})
]
image = ia.quokka_square((64, 64))
images_aug = []
for param_name, param_val in params:
kwargs = {param_name: param_val}
aug = iaa.pillike.Affine(**kwargs)
image_aug = aug(image=image)
images_aug.append(image_aug)
for i, image_aug in enumerate(images_aug):
assert not np.array_equal(image_aug, image)
for j, other_image_aug in enumerate(images_aug):
if i != j:
assert not np.array_equal(image_aug, other_image_aug)
def test_batch_contains_no_images(self):
aug = iaa.pillike.Affine(translate_px={"x": 10})
hm_arr = np.ones((3, 3, 1), dtype=np.float32)
hm = ia.HeatmapsOnImage(hm_arr, shape=(3, 3, 3))
with self.assertRaises(AssertionError):
_hm_aug = aug(heatmaps=hm)
def test_get_parameters(self):
aug = iaa.pillike.Affine(
scale={"x": 1.25, "y": 1.5},
translate_px={"x": 10, "y": 20},
rotate=30,
shear={"x": 40, "y": 50},
fillcolor=100,
center=(0.1, 0.2)
)
params = aug.get_parameters()
assert params[0] is aug.scale
assert params[1] is aug.translate
assert params[2] is aug.rotate
assert params[3] is aug.shear
assert params[4] is aug.cval
assert params[5] is aug.center
|
from random import randint
from forex_python.converter import CurrencyRates
import datetime
print('\nCONVERSOR "UNIVERSAL"\n')
op = int(input('Escolha o que deseja fazer:\n'
'1. TEXTO\n'
'2. MEDIDAS\n'
'3. MOEDAS\n'
'4. FÓRMULAS\n'
'5. NÚMEROS ALEATÓRIOS\n'
'6. SAIR DO PROGRAMA\n'
'Digite uma opção: '))
if op == 1:
print('*' * 20)
print('\nCONVERSOR DE TEXTO')
texto = str(input('Digite o texto para converter: '))
print(f'Seu texto é {texto}\n')
optexto = int(input('Digite o que fazer com seu texto:\n'
'1. MAIÚSCULO\n'
'2. MINÚSCULO\n'
'3. CAPITALIZAR (Primeira letra da frase MAIÚSCULA)\n'
'4. TÍTULO (Primeira letra de cada palavra MAIÚSCULA)\n'
'5. SAIR (Sair do programa)\n'
'Digite uma opção: '))
if optexto == 1:
maiusculo = texto.upper()
print(f'Seu texto em maiúsculo é: {maiusculo}\n')
elif optexto == 2:
minusculo = texto.lower()
print(f'Seu texto em minúsculo é: {minusculo}\n')
elif optexto == 3:
capit = texto.capitalize()
print(f'Seu texto capitalizado é: {capit}')
elif optexto == 4:
title = texto.title()
print(f'Seu texto com iniciais maiúsculas fica: {title}')
elif optexto == 5:
print('Finalizando...')
if op == 2:
opmed = int(input('\nEscolha o que fazer:\n'
'1. TEMPERATURA\n'
'2. DISTÂNCIA\n'
'3. TEMPO\n'
'4. VOLUME\n'
'5. SAIR\n'
'Escolha uma opção: '))
if opmed == 1:
optemp = int(input('\nEscolha a temperatura inicial:\n'
'1. CELSIUS\n'
'2. FAHRENHEIT\n'
'3. KELVIN\n'
'Digite uma opção: '))
temp = float(input('\nDigite a temperatura (°C / °F / K): '))
cf = (temp * 9 / 5) + 32
ck = temp + 273.15
kc = temp - 273.15
kf = (temp - 273.15) * 9/5 + 32
fc = (temp - 32) * 5/9
fk = (temp - 32) * 5/9 + 273.15
if optemp == 1:
print(f'A temperatura {temp}°C convertida é\nKelvin = {ck} K\nFahrenheit = {cf}°F')
if optemp == 2:
print(f'A temperatura {temp}°F convertida é\nCelsius = {fc}°C\nKelvin = {fk} K')
if optemp == 3:
print(f'A temperatura {temp} K convertida é\nCelsius = {kc}°C\nFahrenheit = {kf}°F')
if opmed == 2:
m = float(input('Digite a distância em metros: '))
dm = m * 10
cm = m * 100
mm = m * 1000
dc = m / 10
hm = m / 100
km = m / 1000
print(f'O valor de {m}m equivale a \n'
f'{dm} Decímetros\n'
f'{cm} Centímetros\n'
f'{mm} Milímetros\n'
f'{dc} Decâmetros\n'
f'{hm} Hectômetros\n'
f'{km} Kilômetros\n')
if opmed == 3:
optempo1 = int(input('\nDigite a unidade que irá converter: \n'
'1. SEGUNDOS\n'
'2. MINUTOS\n'
'3. HORAS\n'
'4. DIA\n'
'Escolha uma opção: '))
optempo2 = int(input('\nDigite no que quer converter: \n'
'1. MINUTOS\n'
'2. HORAS\n'
'3. DIA\n'
'Escolha uma opção: '))
# SEGUNDOS
# Segundos para Minuto
if optempo1 == 1 and optempo2 == 1:
segundos = float(input('Digite a quantia em SEGUNDOS: '))
min = segundos / 60
print(f'\n{segundos} segundos = {min:.2f} minutos')
# Segundos para Hora
if optempo1 == 1 and optempo2 == 2:
segundos = float(input('Digite a quantia em SEGUNDOS: '))
hrs = segundos / 3600
print(f'\n{segundos} segundos = {hrs:.2f} horas')
# Segundos para Dia
if optempo1 == 1 and optempo2 == 3:
segundos = float(input('Digite a quantia em SEGUNDOS: '))
d = segundos / 86400
print(f'\n{segundos} segundos = {d:.2f} dias')
# Minutos
# Minutos para Segundos
if optempo1 == 1 and optempo1 == 1:
minutos = float(input('Digite a quantia em MINUTOS: '))
min = minutos * 60
print(f'\n{minutos} minutos = {min:.2f} segundos')
# Minutos para Hora
if optempo1 == 2 and optempo2 == 2:
minutos = float(input('Digite a quantia em MINUTOS: '))
hrs = minutos / 60
print(f'\n{minutos} minutos = {hrs:.2f} hora(s)')
# Minutos para Dia
if optempo1 == 2 and optempo2 == 3:
minutos = float(input('Digite a quantia em MINUTOS: '))
dia = minutos / 1440
print(f'\n{minutos} minutos = {dia:.2f} dia(s)')
# Minutos para Semana
if optempo1 == 2 and optempo2 == 4:
minutos = float(input('Digite a quantia em MINUTOS: '))
semana = minutos / 10080
print(f'\n{minutos} minutos = {semana:.2f} semana(s)')
# Minutos para Mês
if optempo1 == 2 and optempo2 == 5:
minutos = float(input('Digite a quantia em MINUTOS: '))
mes = minutos / 43800
print(f'\n{minutos} minutos = {mes:.2f} mês(es)')
# Minutos para Ano
if optempo1 == 2 and optempo2 == 6:
minutos = float(input('Digite a quantia em MINUTOS: '))
ano = minutos / 525600
print(f'\n{minutos} minutos = {ano:.2f} ano(s)')
# Horas
# Horas para Minuto
if optempo1 == 3 and optempo2 == 1:
horas = float(input('Digite a quantia em HORAS: '))
minutos = horas / 60
print(f'\n{horas} hora(s) = {minutos:.2f} minutos')
# Horas para Hora
if optempo1 == 3 and optempo2 == 2:
horas = float(input('Digite a quantia em HORAS: '))
horas = horas / 1
print(f'\n{horas} hora(s) = {horas:.2f} hora(s)')
# Horas para Dia
if optempo1 == 3 and optempo2 == 3:
horas = float(input('Digite a quantia em HORAS: '))
dia = horas / 24
print(f'\n{horas} horas = {dia:.2f} dia(s)')
# Dias
# Dias para Segundos
if optempo1 == 3 and optempo2 == 1:
dia = float(input('Digite a quantia em DIAS: '))
segundos = dia * 86.400
print(f'\n{dia} dias = {segundos:.2f} segundos')
# Dias em Minutos
if optempo1 == 3 and optempo2 == 1:
dia = float(input('Digite a quantia em DIAS: '))
minutos = dia * 1440
print(f'\n{dia} dias = {minutos:.2f} minutos')
# Dias em Horas
if optempo1 == 3 and optempo2 == 1:
dia = float(input('Digite a quantia em DIAS: '))
horas = dia * 24
print(f'\n{dia} dias = {horas:.2f} horas')
# Dias em Dias (?)
if optempo1 == 3 and optempo2 == 1:
dia = float(input('Digite a quantia em DIAS: '))
dias = dia / 1
print(f'\n{dia} dias = {dias:.2f} dias')
if op == 3:
mes = datetime.datetime.today().month
ano = datetime.datetime.today().year
print(f'NOTA: as contações são feitas em tempo-real\nPortanto, o ano é {ano} e o mês de número {mes} ')
m = CurrencyRates()
print('Conversor de moedas em tempo-real'
'\nLista de moedas:'
'\nBRL'
'\nUSD...')
moeda1 = input('Digite a primeira moeda: ').upper().strip()
moeda2 = input('Em qual moeda ela será convertida? R:').upper().strip()
quantia = int(input("Digite a quantia: "))
print(moeda1, 'Para', moeda2, quantia)
result = m.convert(moeda1, moeda2, quantia)
print(result)
if op == 5:
inicio = int(input('Em que número começar? R: '))
final = int(input('E até onde vai? R: '))
rand = randint(inicio, final)
print(f'O intervalo vai de {inicio} até {final}.\nE o número aleatório foi {rand}.')
if op == 6:
print('\nFinalizando...')
|
import pytest
from zeroae.rocksdb.c import db as rdb
from zeroae.rocksdb.c import pinnableslice
@pytest.fixture
def db(rocksdb_db, rocksdb_writeoptions):
rdb.put(rocksdb_db, rocksdb_writeoptions, "key", "value")
yield rocksdb_db
@pytest.fixture
def slice(db, rocksdb_readoptions):
rv = rdb.get_pinned(db, rocksdb_readoptions, "key")
yield rv
pinnableslice.destroy(rv)
def test_fixture(slice):
assert slice is not None
def test_value(slice):
rv = pinnableslice.value(slice)
assert rv == "value"
|
"""Stream type classes for tap-smartsheet."""
from typing import Any, Dict, Optional, Union, List, Iterable
from singer_sdk import typing as th # JSON Schema typing helpers
from singer_sdk.plugin_base import PluginBase as TapBaseClass
from singer.schema import Schema
from singer_sdk.typing import JSONTypeHelper
from re import sub
from tap_smartsheet.client import SmartsheetStream
def snake_case(s):
"""Converts a string to snake case.
https://www.w3resource.com/python-exercises/string/python-data-type-string-exercise-97.php
"""
return "_".join(
sub(
"([A-Z][a-z]+)", r" \1", sub("([A-Z]+)", r" \1", s.replace("-", " "))
).split()
).lower()
class SheetStream(SmartsheetStream):
"""Define custom stream."""
def __init__(
self,
tap: TapBaseClass,
sheet: Dict,
name: Optional[str] = None,
schema: Optional[Union[Dict[str, Any], Schema]] = None,
path: Optional[str] = None,
) -> None:
self.sheet_id = sheet["id"]
name = "sheet_" + snake_case(sheet["name"])
columns = tap.smartsheet_client.Sheets.get_columns(
self.sheet_id, include_all=True
).to_dict()["data"]
schema = th.PropertiesList()
self.column_id_name_mapping = {}
for column in columns:
self.column_id_name_mapping[column["id"]] = column["title"]
schema.append(
th.Property(column["title"], self.column_type_mapping(column["type"]))
)
super().__init__(tap, schema.to_dict(), name)
def column_type_mapping(self, s) -> JSONTypeHelper:
"""Maps for Smartsheet data types to SDK data types.
https://smartsheet-platform.github.io/api-docs/#column-types"""
return {
"DATE": th.DateType,
"DATETIME": th.DateTimeType,
"ABSTRACT_DATETIME": th.DateTimeType,
}.get(s, th.StringType)
def get_records(self, context: Optional[dict]) -> Iterable[dict]:
"""Retrieve sheets from the API and yield a record for each row."""
sheet_rows = (
self._tap.smartsheet_client.Sheets.get_sheet(self.sheet_id)
.to_dict()
.get("rows", [])
)
for row in sheet_rows:
cells = row["cells"]
yield {
self.column_id_name_mapping[cell["columnId"]]: str(cell.get("value"))
for cell in cells
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
import views
urlpatterns = patterns('',
url(r'^viewall_contest/(?P<group_id>\d+)/$', views.get_running_contest, name='viewall_contest'),
url(r'^viewall_archive/(?P<group_id>\d+)/$', views.get_ended_contest, name='viewall_archive'),
url(r'^viewall_announce/(?P<group_id>\d+)/$', views.get_all_announce, name='viewall_announce'),
url(r'^list/$', views.list, name='list'),
url(r'^my_list/$', views.my_list, name='my_list'),
url(r'^detail/(?P<group_id>\d+)/$', views.detail, name='detail'),
#Group Add/Delete/Edit Part
url(r'^new/$', views.new, name='new'),
url(r'^delete/(?P<group_id>\d+)/$', views.delete, name='delete'),
url(r'^edit/(?P<group_id>\d+)/$', views.edit, name='edit'),
#Announce Part
url(r'^add_announce/(?P<group_id>\d+)/$', views.add_announce, name='add_announce'),
url(r'^delete_announce/(?P<announce_id>\d+)/(?P<group_id>\d+)/(?P<redirect_page>\w+)/$', views.delete_announce, name='delete_announce'),
url(r'^edit_announce/(?P<announce_id>\d+)/(?P<group_id>\d+)/(?P<redirect_page>\w+)/$', views.edit_announce, name='edit_announce'),
#Member Part
#url(r'^delete_member/(?P<group_id>\d+)/(?P<student_name>\w+)/$', views.delete_member, name='delete_member'),
)
|
#!/user/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from src.libraries.myMathLib import point_to_segment_distance, euclidean_distance_3d, lineseg_dist, distance_numpy
def main():
expected = 5
a0 = np.array([3, 1, -1])
a1 = np.array([5, 2, 1])
c = np.array([0, 2, 3])
print(lineseg_dist(c, a0, a1), point_to_segment_distance(c, a0, a1), distance_numpy(c, a0, a1), expected)
expected = 1
a0 = np.array([3, 2, 1])
a1 = np.array([4, 2, 1])
c = np.array([0, 2, 0])
print(lineseg_dist(c, a0, a1), point_to_segment_distance(c, a0, a1), distance_numpy(c, a0, a1), expected)
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from core_model.models import Appointment
class AppointmentSerializer(serializers.ModelSerializer):
class Meta:
model = Appointment
fields = '__all__'
read_only_fields = (
'id',
'end_date'
)
|
from contextlib import AsyncExitStack, ExitStack
from types import TracebackType
from typing import Any, Dict, Optional, Type, Union
from di._utils.scope_map import ScopeMap
from di._utils.types import CacheKey, FusedContextManager
from di.api.scopes import Scope
class ContainerState:
__slots__ = ("cached_values", "stacks")
def __init__(
self,
cached_values: Optional[ScopeMap[CacheKey, Any]] = None,
stacks: Optional[Dict[Scope, Union[AsyncExitStack, ExitStack]]] = None,
) -> None:
self.cached_values = cached_values or ScopeMap()
self.stacks = stacks or {}
def enter_scope(self, scope: Scope) -> "FusedContextManager[ContainerState]":
"""Enter a scope and get back a new ContainerState object that you can use to execute dependencies."""
new = ContainerState(
cached_values=ScopeMap(self.cached_values.copy()),
stacks=self.stacks.copy(),
)
return ScopeContext(new, scope)
class ScopeContext(FusedContextManager[ContainerState]):
__slots__ = ("state", "scope", "stack")
stack: Union[AsyncExitStack, ExitStack]
def __init__(self, state: ContainerState, scope: Scope) -> None:
self.state = state
self.scope = scope
def __enter__(self) -> ContainerState:
self.state.stacks[self.scope] = self.stack = ExitStack()
self.state.cached_values.add_scope(self.scope)
return self.state
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Union[None, bool]:
return self.stack.__exit__(exc_type, exc_value, traceback) # type: ignore[union-attr,no-any-return]
async def __aenter__(self) -> ContainerState:
self.state.stacks[self.scope] = self.stack = AsyncExitStack()
self.state.cached_values.add_scope(self.scope)
return self.state
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Union[None, bool]:
return await self.stack.__aexit__(exc_type, exc_value, traceback) # type: ignore[union-attr,no-any-return]
|
#!/usr/bin/env python
# Copyright (c) 2018 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import subprocess
import time
def main():
parser = argparse.ArgumentParser(description="Intelligently reduce test case")
parser.add_argument(
"binary", type=str, help="Path to the test binary to run.")
parser.add_argument(
"input_test", type=str, help="Path to test to reduce.")
parser.add_argument(
"output_test", type=str, help="Path for reduced test.")
parser.add_argument(
"--which_test", type=str, help="Which test to run (equivalent to --input_which_test).", default=None)
parser.add_argument(
"--criteria", type=str, help="String to search for in valid reduction outputs.",
default=None)
parser.add_argument(
"--search", action="store_true", help="Allow initial test to not satisfy criteria (search for test).",
default=None)
parser.add_argument(
"--timeout", type=int, help="After this amount of time (in seconds), give up on reduction.",
default=1200)
class TimeoutException(Exception):
pass
args = parser.parse_args()
deepstate = args.binary
test = args.input_test
out = args.output_test
checkString = args.criteria
whichTest = args.which_test
start = time.time()
def runCandidate(candidate):
if (time.time() - start) > args.timeout:
raise TimeoutException
with open(".reducer.out", 'w') as outf:
cmd = [deepstate + " --input_test_file " +
candidate + " --verbose_reads"]
if whichTest is not None:
cmd += ["--input_which_test", whichTest]
subprocess.call(cmd, shell=True, stdout=outf, stderr=outf)
result = []
with open(".reducer.out", 'r') as inf:
for line in inf:
result.append(line)
return result
def checks(result):
for line in result:
if checkString:
if checkString in line:
return True
else:
if "ERROR: Failed:" in line:
return True
if "ERROR: Crashed" in line:
return True
return False
def writeAndRunCandidate(test):
with open(".candidate.test", 'wb') as outf:
outf.write(test)
r = runCandidate(".candidate.test")
return r
def structure(result):
OneOfs = []
currentOneOf = []
for line in result:
if "STARTING OneOf CALL" in line:
currentOneOf.append(-1)
elif "Reading byte at" in line:
lastRead = int(line.split()[-1])
if currentOneOf[-1] == -1:
currentOneOf[-1] = lastRead
elif "FINISHED OneOf CALL" in line:
OneOfs.append((currentOneOf[-1], lastRead))
currentOneOf = currentOneOf[:-1]
return (OneOfs, lastRead)
initial = runCandidate(test)
if (not args.search) and (not checks(initial)):
print("STARTING TEST DOES NOT SATISFY REDUCTION CRITERIA")
return 1
with open(test, 'rb') as test:
currentTest = bytearray(test.read())
print("ORIGINAL TEST HAS", len(currentTest), "BYTES")
s = structure(initial)
if (s[1]+1) < len(currentTest):
print("LAST BYTE READ IS", s[1])
print("SHRINKING TO IGNORE UNREAD BYTES")
currentTest = currentTest[:s[1]+1]
changed = True
try:
while changed:
changed = False
cuts = s[0]
for c in cuts:
newTest = currentTest[:c[0]] + currentTest[c[1]+1:]
r = writeAndRunCandidate(newTest)
if checks(r):
print("ONEOF REMOVAL REDUCED TEST TO", len(newTest), "BYTES")
changed = True
break
if not changed:
for b in range(0, len(currentTest)):
for v in range(b+1, len(currentTest)):
newTest = currentTest[:b] + currentTest[v:]
r = writeAndRunCandidate(newTest)
if checks(r):
print("BYTE RANGE REMOVAL REDUCED TEST TO", len(newTest), "BYTES")
changed = True
break
if changed:
break
if not changed:
for b in range(0, len(currentTest)):
for v in range(0, currentTest[b]):
newTest = bytearray(currentTest)
newTest[b] = v
r = writeAndRunCandidate(newTest)
if checks(r):
print("BYTE REDUCTION: BYTE", b, "FROM", currentTest[b], "TO", v)
changed = True
break
if changed:
break
if not changed:
for b in range(0, len(currentTest)):
if currentTest[b] == 0:
continue
newTest = bytearray(currentTest)
newTest[b] = currentTest[b]-1
newTest = newTest[:b+1] + newTest[b+2:]
r = writeAndRunCandidate(newTest)
if checks(r):
print("BYTE REDUCE AND DELETE AT BYTE", b)
changed = True
break
if not changed:
for b1 in range(0, len(currentTest)-4):
for b2 in range(b1+2, len(currentTest)-4):
v1 = (currentTest[b1], currentTest[b1+1])
v2 = (currentTest[b2], currentTest[b2+1])
if (v1 == v2):
ba = bytearray(v1)
part1 = currentTest[:b1]
part2 = currentTest[b1+2:b2]
part3 = currentTest[b2+2:]
banews = []
banews.append(ba[0:1])
banews.append(ba[1:2])
if ba[0] > 0:
for v in range(0, ba[0]):
banews.append(bytearray([v, ba[1]]))
banews.append(bytearray([ba[0]-1]))
if ba[1] > 0:
for v in range(0, ba[1]):
banews.append(bytearray([ba[0], v]))
for banew in banews:
newTest = part1 + banew + part2 + banew + part3
r = writeAndRunCandidate(newTest)
if checks(r):
print("BYTE PATTERN", tuple(ba), "AT", b1, "AND", b2, "CHANGED TO", tuple(banew))
changed = True
break
if changed:
break
if changed:
break
if changed:
currentTest = newTest
s = structure(r)
else:
print("NO (MORE) REDUCTIONS FOUND")
except TimeoutException:
print("REDUCTION TIMED OUT AFTER", args.timeout, "SECONDS")
if (s[1] + 1) > len(currentTest):
print("PADDING TEST WITH", (s[1] + 1) - len(currentTest), "ZEROS")
padding = bytearray('\x00' * ((s[1] + 1) - len(currentTest)))
currentTest = currentTest + padding
print()
print("WRITING REDUCED TEST WITH", len(currentTest), "BYTES TO", out)
with open(out, 'wb') as outf:
outf.write(currentTest)
return 0
if "__main__" == __name__:
exit(main())
|
"""Test cases for the location module."""
from pytest_mock import MockFixture
from rdflib import Graph
from rdflib.compare import graph_diff, isomorphic
from skolemizer.testutils import skolemization
from datacatalogtordf import Location
def test_to_graph_should_return_identifier_set_at_constructor() -> None:
"""It returns a centroid graph isomorphic to spec."""
location = Location("http://example.com/locations/1")
location.centroid = "POINT(4.88412 52.37509)"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix locn: <http://www.w3.org/ns/locn#> .
@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .
<http://example.com/locations/1> a dct:Location ;
dcat:centroid "POINT(4.88412 52.37509)"^^geosparql:asWKT ;
.
"""
g1 = Graph().parse(data=location.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_location_skolemized(mocker: MockFixture) -> None:
"""It returns a title graph isomorphic to spec."""
location = Location()
location.geometry = """POLYGON ((
4.8842353 52.375108 , 4.884276 52.375153 ,
4.8842567 52.375159 , 4.883981 52.375254 ,
4.8838502 52.375109 , 4.883819 52.375075 ,
4.8841037 52.374979 , 4.884143 52.374965 ,
4.8842069 52.375035 , 4.884263 52.375016 ,
4.8843200 52.374996 , 4.884255 52.374926 ,
4.8843289 52.374901 , 4.884451 52.375034 ,
4.8842353 52.375108
))"""
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix locn: <http://www.w3.org/ns/locn#> .
@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .
<http://wwww.digdir.no/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
a dct:Location ;
locn:geometry \"\"\"POLYGON ((
4.8842353 52.375108 , 4.884276 52.375153 ,
4.8842567 52.375159 , 4.883981 52.375254 ,
4.8838502 52.375109 , 4.883819 52.375075 ,
4.8841037 52.374979 , 4.884143 52.374965 ,
4.8842069 52.375035 , 4.884263 52.375016 ,
4.8843200 52.374996 , 4.884255 52.374926 ,
4.8843289 52.374901 , 4.884451 52.375034 ,
4.8842353 52.375108
))\"\"\"^^geosparql:asWKT ;
.
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization",
return_value=skolemization,
)
g1 = Graph().parse(data=location.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_geometry_as_graph() -> None:
"""It returns a title graph isomorphic to spec."""
location = Location()
location.identifier = "http://example.com/locations/1"
location.geometry = """POLYGON ((
4.8842353 52.375108 , 4.884276 52.375153 ,
4.8842567 52.375159 , 4.883981 52.375254 ,
4.8838502 52.375109 , 4.883819 52.375075 ,
4.8841037 52.374979 , 4.884143 52.374965 ,
4.8842069 52.375035 , 4.884263 52.375016 ,
4.8843200 52.374996 , 4.884255 52.374926 ,
4.8843289 52.374901 , 4.884451 52.375034 ,
4.8842353 52.375108
))"""
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix locn: <http://www.w3.org/ns/locn#> .
@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .
<http://example.com/locations/1> a dct:Location ;
locn:geometry \"\"\"POLYGON ((
4.8842353 52.375108 , 4.884276 52.375153 ,
4.8842567 52.375159 , 4.883981 52.375254 ,
4.8838502 52.375109 , 4.883819 52.375075 ,
4.8841037 52.374979 , 4.884143 52.374965 ,
4.8842069 52.375035 , 4.884263 52.375016 ,
4.8843200 52.374996 , 4.884255 52.374926 ,
4.8843289 52.374901 , 4.884451 52.375034 ,
4.8842353 52.375108
))\"\"\"^^geosparql:asWKT ;
.
"""
g1 = Graph().parse(data=location.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_bounding_box_as_graph() -> None:
"""It returns a bounding box graph isomorphic to spec."""
location = Location()
location.identifier = "http://example.com/locations/1"
location.bounding_box = """POLYGON ((
3.053 47.975 , 7.24 47.975 ,
7.24 53.504 , 3.053 53.504 ,
3.053 47.975
))"""
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix locn: <http://www.w3.org/ns/locn#> .
@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .
<http://example.com/locations/1> a dct:Location ;
dcat:bbox \"\"\"POLYGON ((
3.053 47.975 , 7.24 47.975 ,
7.24 53.504 , 3.053 53.504 ,
3.053 47.975
))\"\"\"^^geosparql:asWKT ;
.
"""
g1 = Graph().parse(data=location.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_centroid_as_graph() -> None:
"""It returns a centroid graph isomorphic to spec."""
location = Location()
location.identifier = "http://example.com/locations/1"
location.centroid = "POINT(4.88412 52.37509)"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix locn: <http://www.w3.org/ns/locn#> .
@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .
<http://example.com/locations/1> a dct:Location ;
dcat:centroid \"POINT(4.88412 52.37509)\"^^geosparql:asWKT ;
.
"""
g1 = Graph().parse(data=location.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
# ---------------------------------------------------------------------- #
# Utils for displaying debug information
def _dump_diff(g1: Graph, g2: Graph) -> None:
in_both, in_first, in_second = graph_diff(g1, g2)
print("\nin both:")
_dump_turtle(in_both)
print("\nin first:")
_dump_turtle(in_first)
print("\nin second:")
_dump_turtle(in_second)
def _dump_turtle(g: Graph) -> None:
for _l in g.serialize(format="turtle").splitlines():
if _l:
print(_l)
|
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from distsup import utils
from distsup.logger import DefaultTensorLogger
from distsup.modules import convolutional, wavenet
from distsup.models.adversarial import Adversarial
from distsup.models.base import Model
from distsup.models.streamtokenizer import StreamTokenizerNet
from distsup.modules.aux_modules import attach_auxiliary
from distsup.modules.bottlenecks import VQBottleneck
from distsup.modules.predictors import FramewisePredictor, GlobalPredictor
from distsup.modules.conditioning import CondNorm2d
logger = DefaultTensorLogger()
def vqvae_nanxin(input_height=28, codebook_size=50, **kwargs):
encoder = [
dict(in_channels=1,
out_channels=256,
padding=1,
kernel_size=3,
stride=2,
bias=False),
dict(in_channels=256,
out_channels=256,
kernel_size=3,
padding=1,
stride=2,
bias=False),
dict(in_channels=256, out_channels=64, padding=2, kernel_size=5, bias=True),
]
decoder = [
dict(in_channels=64, out_channels=256, padding=2, kernel_size=5, bias=False),
dict(in_channels=256,
out_channels=256,
kernel_size=3,
padding=1,
stride=2,
bias=False),
dict(in_channels=256,
out_channels=1,
kernel_size=3,
stride=2,
output_padding=1,
bias=True),
]
return VQVAE(encoder, decoder, input_height=input_height, aggreg_stride=1,
codebook_size=codebook_size, **kwargs)
class VQVAE(StreamTokenizerNet):
"""
An image encoder-decoder model with a quantized bottleneck.
Args:
encoder (list of dicts): specification of convolutions for the
encoder. BNs and ReLUs are added automatically except for the
next layer.
decoder (list of dicts): specification of convolutions for the
decoder. BNs and ReLUs are added automatically except for the
next layer. ConvTranspose2d are used.
aggreg (int): how many spatial cells to aggregate as input for the
quantization layer. Right now this has to be a multiple of the
height of the latent variable since we have temporal models in
mind, but this could be relaxed.
codebook_size (int): how many quantized codes are learnable
aggreg_stride (int or None): stride when aggregating cells in the
width dimension. If set to None, they will be aggregated in non
overlapping windows.
"""
def __init__(self,
encoder,
decoder,
codebook_size,
input_height,
aggreg_stride=1,
adversarial_size=0,
adv_penalty=1e-9,
with_framewise_probe=False,
adv_class_embedding_size=128,
**kwargs):
super(VQVAE, self).__init__(**kwargs)
self.stride = aggreg_stride
self.adv_penalty = adv_penalty
self.indices = None
self.encoder = []
for conv in encoder[:-1]:
self.encoder += [
nn.Conv2d(**conv),
nn.BatchNorm2d(conv['out_channels']),
nn.ReLU(inplace=True)
]
self.encoder.append(nn.Conv2d(**encoder[-1]))
self.encoder = nn.Sequential(*self.encoder)
self._compute_align_params()
assert input_height % self.align_upsampling == 0, (
"The height of the input image ({}) must be divisible by {}"
).format(input_height, self.align_upsampling)
hidden_height = input_height // self.align_upsampling
d = self.encoder[-1].out_channels * aggreg_stride * hidden_height
self.vq = VQBottleneck(d, d, codebook_size, dim=1)
self.char_pred = None
if with_framewise_probe:
self.char_pred = attach_auxiliary(
self.vq,
FramewisePredictor(d, len(self.dataset.alphabet), aggreg=2),
bp_to_main=False)
self.adversarial = None
if adversarial_size != 0:
self.adversarial = Adversarial(
GlobalPredictor(
self.encoder[-1].out_channels * hidden_height,
adversarial_size, time_reduce='max',
aggreg=5), mode='reverse')
self.align_upsampling *= aggreg_stride
self.embs = nn.Embedding(len(self.dataset.alphabet),
adv_class_embedding_size)
self.decoder = []
for conv in decoder[:-1]:
self.decoder += [
nn.ConvTranspose2d(**conv),
CondNorm2d(conv['out_channels'], adv_class_embedding_size),
nn.ReLU(inplace=True)
]
self.decoder.append(nn.ConvTranspose2d(**decoder[-1]))
self.decoder.append(nn.Sigmoid())
self.decoder = nn.ModuleList(self.decoder)
self.add_probes()
self.apply(utils.conv_weights_xavier_init)
def _compute_align_params(self):
self.align_upsampling = 1
self.align_offset = 0
for module in self.encoder.children():
if isinstance(module, nn.Conv2d):
self.align_offset += (module.kernel_size[-1] - 1) // 2 - module.padding[1]
self.align_upsampling *= module.stride[1]
elif isinstance(module, (nn.BatchNorm2d, nn.ReLU)):
logging.debug(f'Layer of type {module.__class__.__name__} '
f'is assumed not to change the data rate.')
else:
raise NotImplementedError(f'The rate handling of module {module.__class__.__name__} '
f'has not been implemented.'
f'If this module affects the data rate, '
f'handle the way in which the alignment changes.'
f'If not, add it to the previous case of this if statement.')
return
def encode(self, x):
return self.encoder(x)
def decode(self, z, y):
y_emb = self.embs(y)
for m in self.decoder:
if isinstance(m, CondNorm2d):
z = m(z, y_emb)
else:
z = m(z)
return z
def forward(self, x, y):
z = self.encode(x)
aggreg_width = self.stride
b, zc, zh, zw = z.shape
z = F.unfold(z, (zh, aggreg_width),
stride=(1, self.stride or aggreg_width))
if self.adversarial is not None:
self.adversarial(z)
zq, _, details = self.vq(z)
self.indices = details['indices']
zq = F.fold(zq,
output_size=(zh, zw),
kernel_size=(zh, aggreg_width),
stride=(1, self.stride or aggreg_width))
x2 = self.decode(zq, y)
return x2, details['indices']
def align_tokens_to_features(self, batch, tokens):
token2 = tokens.repeat_interleave(self.align_upsampling, dim=1)
token2 = token2[:, :batch['features'].shape[1]]
return token2
def minibatch_loss_and_tokens(self, batch):
x = batch['features']
x = x.permute(0, 3, 2, 1)
mask = utils.get_mask2d(batch['features_len'], x)
if x.shape[3] % self.align_upsampling != 0:
os = torch.zeros(
*x.shape[0:3],
self.align_upsampling - x.shape[3] % self.align_upsampling,
device=x.device)
x = torch.cat([x, os], dim=3)
mask = torch.cat([mask, os], dim=3)
if 'adversarial' in batch:
x2, indices = self(x * 2 - 1, batch['adversarial'])
else:
x2, indices = self(x * 2 - 1, torch.zeros(x.shape[0],
device=x.device).long())
logger.log_images('orig', x[:3])
logger.log_images('recs', x2[:3])
main_loss = self.loss(x, x2, mask)
details = {
'recon_loss': main_loss,
}
if self.char_pred is not None and 'alignment' in batch:
char_loss, char_details = self.char_pred.loss(x, batch['alignment'])
details['char_loss'] = char_loss
details['char_acc'] = char_details['acc']
main_loss += char_loss
if self.adversarial is not None and 'adversarial' in batch:
friend_loss, advloss, adv_details = self.adversarial.loss(batch['adversarial'])
main_loss = main_loss + friend_loss + self.adv_penalty * advloss
details['adversarial_loss'] = advloss
details['adversarial_friendly_loss'] = friend_loss
details['adversarial_acc'] = adv_details['acc']
return main_loss, details, indices
def loss(self, x, x2, mask):
recon = F.l1_loss(x2 * mask, x)
return recon
|
from bench import bench
print(bench(10, '', '''
s = []
for i in range(100000): s.append(i)
for _ in range(100000): s.pop()
'''))
|
import os
import numpy as np
from skimage.io import imread
from skimage.exposure import adjust_gamma
class DataGenerator():
"""Reads dataset files (images & annotations) and prepare training & validation batches"""
def __init__(self, batch_size, validation_size, directory, train_test='train'):
"""Load dataset into RAM & prepare train/validation split
Reads train_%d.bmp & train_%d_anno.bmp files in directory and randomly set aside
validation_size images from the dataset for validation.
"""
self.batch_size = batch_size
self.validation_size = validation_size
self.directory = directory
nPerSet = {'train': 85, 'testA': 60, 'testB': 20}
self.image_files = [os.path.join(self.directory, f'{train_test}_{i}.bmp') for i in range(1, nPerSet[train_test]+1)]
self.annotation_files = [os.path.join(self.directory, f'{train_test}_{i}_anno.bmp') for i in range(1, nPerSet[train_test]+1)]
# Pre-load all images in RAM
self.full_images = [imread(f)/255 for f in self.image_files]
self.full_annotations = [imread(f) for f in self.annotation_files]
# Train/Validation split
self.idxs = np.arange(len(self.image_files))
np.random.seed(1)
np.random.shuffle(self.idxs)
self.val_idxs = self.idxs[:self.validation_size]
self.train_idxs = self.idxs[self.validation_size:]
@staticmethod
def _augment(batch_x, batch_y):
"""Basic data augmentation:
Horizontal/Vertical symmetry
Random noise
Gamma correction"""
# Vertical symmetry
if( np.random.random()<0.5 ):
batch_x = batch_x[:,::-1,:,:]
batch_y = batch_y[:,::-1,:]
# Horizontal symmetry
if( np.random.random()<0.5 ):
batch_x = batch_x[:,:,::-1,:]
batch_y = batch_y[:,:,::-1]
# Gamma (before random noise because input values must be between [0,1])
gamma = (np.random.random()-0.5)*2
if gamma < 0:
gamma=1/(1-gamma)
else:
gamma=1+gamma
batch_x_ = batch_x.copy()
for i in range(len(batch_x)):
batch_x_[i] = adjust_gamma(batch_x[i], gamma=gamma)
# Random noise
batch_x_ += np.random.normal(0, 0.02, size=batch_x.shape)
return batch_x_,batch_y
|
'''
------------------------------------------------------------------------
Household functions for taxes in the steady state and along the
transition path..
This file calls the following files:
tax.py
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import tax, utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def marg_ut_cons(c, sigma):
'''
Computation of marginal utility of consumption.
Inputs:
c = [T,S,J] array, household consumption
sigma = scalar, coefficient of relative risk aversion
Functions called: None
Objects in function:
output = [T,S,J] array, marginal utility of consumption
Returns: output
'''
if np.ndim(c) == 0:
c = np.array([c])
epsilon = 0.003
cvec_cnstr = c < epsilon
MU_c = np.zeros(c.shape)
MU_c[~cvec_cnstr] = c[~cvec_cnstr] ** (-sigma)
b2 = (-sigma * (epsilon ** (-sigma - 1))) / 2
b1 = (epsilon ** (-sigma)) - 2 * b2 * epsilon
MU_c[cvec_cnstr] = 2 * b2 * c[cvec_cnstr] + b1
output = MU_c
output = np.squeeze(output)
return output
def marg_ut_labor(n, chi_n, p):
'''
Computation of marginal disutility of labor.
Inputs:
n = [T,S,J] array, household labor supply
params = length 4 tuple (b_ellipse, upsilon, ltilde, chi_n)
b_ellipse = scalar, scaling parameter in elliptical utility function
upsilon = curvature parameter in elliptical utility function
ltilde = scalar, upper bound of household labor supply
chi_n = [S,] vector, utility weights on disutility of labor
Functions called: None
Objects in function:
output = [T,S,J] array, marginal disutility of labor supply
Returns: output
'''
nvec = n
if np.ndim(nvec) == 0:
nvec = np.array([nvec])
eps_low = 0.000001
eps_high = p.ltilde - 0.000001
nvec_low = nvec < eps_low
nvec_high = nvec > eps_high
nvec_uncstr = np.logical_and(~nvec_low, ~nvec_high)
MDU_n = np.zeros(nvec.shape)
MDU_n[nvec_uncstr] = (
(p.b_ellipse / p.ltilde) *
((nvec[nvec_uncstr] / p.ltilde) ** (p.upsilon - 1)) *
((1 - ((nvec[nvec_uncstr] / p.ltilde) ** p.upsilon)) **
((1 - p.upsilon) / p.upsilon)))
b2 = (0.5 * p.b_ellipse * (p.ltilde ** (-p.upsilon)) * (p.upsilon - 1) *
(eps_low ** (p.upsilon - 2)) *
((1 - ((eps_low / p.ltilde) ** p.upsilon)) **
((1 - p.upsilon) / p.upsilon)) *
(1 + ((eps_low / p.ltilde) ** p.upsilon) *
((1 - ((eps_low / p.ltilde) ** p.upsilon)) ** (-1))))
b1 = ((p.b_ellipse / p.ltilde) * ((eps_low / p.ltilde) **
(p.upsilon - 1)) *
((1 - ((eps_low / p.ltilde) ** p.upsilon)) **
((1 - p.upsilon) / p.upsilon)) - (2 * b2 * eps_low))
MDU_n[nvec_low] = 2 * b2 * nvec[nvec_low] + b1
d2 = (0.5 * p.b_ellipse * (p.ltilde ** (-p.upsilon)) * (p.upsilon - 1) *
(eps_high ** (p.upsilon - 2)) *
((1 - ((eps_high / p.ltilde) ** p.upsilon)) **
((1 - p.upsilon) / p.upsilon)) *
(1 + ((eps_high / p.ltilde) ** p.upsilon) *
((1 - ((eps_high / p.ltilde) ** p.upsilon)) ** (-1))))
d1 = ((p.b_ellipse / p.ltilde) * ((eps_high / p.ltilde) **
(p.upsilon - 1)) * ((1 - ((eps_high / p.ltilde) ** p.upsilon)) **
((1 - p.upsilon) / p.upsilon)) - (2 * d2 * eps_high))
MDU_n[nvec_high] = 2 * d2 * nvec[nvec_high] + d1
output = MDU_n * np.squeeze(chi_n)
output = np.squeeze(output)
return output
def get_bq(BQ, j, p, method):
'''
Calculation of bequests to each lifetime income group.
Inputs:
r = [T,] vector, interest rates
b_splus1 = [T,S,J] array, distribution of wealth/capital
holdings one period ahead
params = length 5 tuple, (omega, lambdas, rho, g_n, method)
omega = [S,T] array, population weights
lambdas = [J,] vector, fraction in each lifetime income group
rho = [S,] vector, mortality rates
g_n = scalar, population growth rate
method = string, 'SS' or 'TPI'
Functions called: None
Objects in function:
BQ_presum = [T,S,J] array, weighted distribution of
wealth/capital holdings one period ahead
BQ = [T,J] array, aggregate bequests by lifetime income group
Returns: BQ
'''
if p.use_zeta:
if j is not None:
if method == 'SS':
bq = (p.zeta[:, j] * BQ) / (p.lambdas[j] * p.omega_SS)
else:
len_T = BQ.shape[0]
bq = ((np.reshape(p.zeta[:, j], (1, p.S)) *
BQ.reshape((len_T, 1))) /
(p.lambdas[j] * p.omega[:len_T, :]))
else:
if method == 'SS':
bq = ((p.zeta * BQ) / (p.lambdas.reshape((1, p.J)) *
p.omega_SS.reshape((p.S, 1))))
else:
len_T = BQ.shape[0]
bq = ((np.reshape(p.zeta, (1, p.S, p.J)) *
utils.to_timepath_shape(BQ, p)) /
(p.lambdas.reshape((1, 1, p.J)) *
p.omega[:len_T, :].reshape((len_T, p.S, 1))))
else:
if j is not None:
if method == 'SS':
bq = np.tile(BQ[j], p.S) / p.lambdas[j]
if method == 'TPI':
len_T = BQ.shape[0]
bq = np.tile(np.reshape(BQ[:, j] / p.lambdas[j],
(len_T, 1)), (1, p.S))
else:
if method == 'SS':
BQ_per = BQ / np.squeeze(p.lambdas)
bq = np.tile(np.reshape(BQ_per, (1, p.J)), (p.S, 1))
if method == 'TPI':
len_T = BQ.shape[0]
BQ_per = BQ / p.lambdas.reshape(1, p.J)
bq = np.tile(np.reshape(BQ_per, (len_T, 1, p.J)),
(1, p.S, 1))
return bq
def get_cons(r, w, b, b_splus1, n, bq, net_tax, e, tau_c, p):
'''
Calculation of household consumption.
Inputs:
r = [T,] vector, interest rates
w = [T,] vector, wage rates
b = [T,S,J] array, distribution of wealth/capital
b_splus1 = [T,S,J] array, distribution of wealth/capital,
one period ahead
n = [T,S,J] array, distribution of labor supply
BQ = [T,J] array, bequests by lifetime income group
net_tax = [T,S,J] array, distribution of net taxes
params = length 3 tuple (e, lambdas, g_y)
e = [S,J] array, effective labor units by age and
lifetime income group
lambdas = [S,] vector, fraction of population in each lifetime
income group
g_y = scalar, exogenous labor augmenting technological growth
Functions called: None
Objects in function:
cons = [T,S,J] array, household consumption
Returns: cons
'''
cons = ((1 + r) * b + w * e * n + bq - b_splus1 * np.exp(p.g_y) -
net_tax) / (1 + tau_c)
return cons
def FOC_savings(r, w, b, b_splus1, n, bq, factor, T_H, theta, e, rho,
tau_c, etr_params, mtry_params, t, j, p, method):
'''
Computes Euler errors for the FOC for savings in the steady state.
This function is usually looped through over J, so it does one
lifetime income group at a time.
Inputs:
r = scalar, interest rate
w = scalar, wage rate
b = [S,J] array, distribution of wealth/capital
b_splus1 = [S,J] array, distribution of wealth/capital,
one period ahead
b_splus2 = [S,J] array, distribution of wealth/capital, two
periods ahead
n = [S,J] array, distribution of labor supply
BQ = [J,] vector, aggregate bequests by lifetime income
group
factor = scalar, scaling factor to convert model income to
dollars
T_H = scalar, lump sum transfer
params = length 18 tuple (e, sigma, beta, g_y, chi_b,
theta, tau_bq, rho, lambdas, J,
S, etr_params, mtry_params,
h_wealth, p_wealth, m_wealth,
tau_payroll, tau_bq)
e = [S,J] array, effective labor units
sigma = scalar, coefficient of relative risk aversion
beta = scalar, discount factor
g_y = scalar, exogenous labor augmenting technological
growth
chi_b = [J,] vector, utility weight on bequests for each
lifetime income group
theta = [J,] vector, replacement rate for each lifetime
income group
tau_bq = scalar, bequest tax rate (scalar)
rho = [S,] vector, mortality rates
lambdas = [J,] vector, ability weights
J = integer, number of lifetime income groups
S = integer, number of economically active periods in
lifetime
etr_params = [S,12] array, parameters of effective income tax
rate function
mtry_params = [S,12] array, parameters of marginal tax rate on
capital income function
h_wealth = scalar, parameter in wealth tax function
p_wealth = scalar, parameter in wealth tax function
m_wealth = scalar, parameter in wealth tax function
tau_payroll = scalar, payroll tax rate
tau_bq = scalar, bequest tax rate
Functions called:
get_cons
marg_ut_cons
tax.total_taxes
tax.MTR_income
Objects in function:
tax1 = [S,J] array, net taxes in the current period
tax2 = [S,J] array, net taxes one period ahead
cons1 = [S,J] array, consumption in the current period
cons2 = [S,J] array, consumption one period ahead
deriv = [S,J] array, after-tax return on capital
savings_ut = [S,J] array, marginal utility from savings
euler = [S,J] array, Euler error from FOC for savings
Returns: euler
'''
if j is not None:
chi_b = p.chi_b[j]
else:
chi_b = p.chi_b
if method == 'TPI':
r = utils.to_timepath_shape(r, p)
w = utils.to_timepath_shape(w, p)
T_H = utils.to_timepath_shape(T_H, p)
taxes = tax.total_taxes(r, w, b, n, bq, factor, T_H, theta, t, j,
False, method, e, etr_params, p)
cons = get_cons(r, w, b, b_splus1, n, bq, taxes, e, tau_c, p)
deriv = ((1 + r) - r * (tax.MTR_income(r, w, b, n, factor, True, e,
etr_params, mtry_params, p)))
savings_ut = (rho * np.exp(-p.sigma * p.g_y) * chi_b *
b_splus1 ** (-p.sigma))
euler_error = np.zeros_like(n)
if n.shape[0] > 1:
euler_error[:-1] = (marg_ut_cons(cons[:-1], p.sigma) *
(1 / (1 + tau_c[:-1])) - p.beta *
(1 - rho[:-1]) * deriv[1:] *
marg_ut_cons(cons[1:], p.sigma) *
(1 / (1 + tau_c[1:])) * np.exp(-p.sigma * p.g_y)
- savings_ut[:-1])
euler_error[-1] = (marg_ut_cons(cons[-1], p.sigma) *
(1 / (1 + tau_c[-1])) - savings_ut[-1])
else:
euler_error[-1] = (marg_ut_cons(cons[-1], p.sigma) *
(1 / (1 + tau_c[-1])) - savings_ut[-1])
return euler_error
def FOC_labor(r, w, b, b_splus1, n, bq, factor, T_H, theta, chi_n, e,
tau_c, etr_params, mtrx_params, t, j, p, method):
'''
Computes Euler errors for the FOC for labor supply in the steady
state. This function is usually looped through over J, so it does
one lifetime income group at a time.
Inputs:
r = scalar, interest rate
w = scalar, wage rate
b = [S,J] array, distribution of wealth/capital
holdings
b_splus1 = [S,J] array, distribution of wealth/capital
holdings one period ahead
n = [S,J] array, distribution of labor supply
BQ = [J,] vector, aggregate bequests by lifetime
income group
factor = scalar, scaling factor to convert model income to
dollars
T_H = scalar, lump sum transfer
params = length 19 tuple (e, sigma, g_y, theta, b_ellipse,
upsilon, ltilde, chi_n, tau_bq,
lambdas, J, S, etr_params,
mtrx_params, h_wealth, p_wealth,
m_wealth, tau_payroll, tau_bq)
e = [S,J] array, effective labor units
sigma = scalar, coefficient of relative risk aversion
g_y = scalar, exogenous labor augmenting technological
growth
theta = [J,] vector, replacement rate for each lifetime
income group
b_ellipse = scalar, scaling parameter in elliptical utility
function
upsilon = curvature parameter in elliptical utility function
chi_n = [S,] vector, utility weights on disutility of labor
ltilde = scalar, upper bound of household labor supply
tau_bq = scalar, bequest tax rate (scalar)
lambdas = [J,] vector, ability weights
J = integer, number of lifetime income groups
S = integer, number of economically active periods in
lifetime
etr_params = [S,10] array, parameters of effective income tax
rate function
mtrx_params = [S,10] array, parameters of marginal tax rate on
labor income function
h_wealth = scalar, parameter in wealth tax function
p_wealth = scalar, parameter in wealth tax function
m_wealth = scalar, parameter in wealth tax function
tau_payroll = scalar, payroll tax rate
tau_bq = scalar, bequest tax rate
Functions called:
get_cons
marg_ut_cons
marg_ut_labor
tax.total_taxes
tax.MTR_income
Objects in function:
tax = [S,J] array, net taxes in the current period
cons = [S,J] array, consumption in the current period
deriv = [S,J] array, net of tax share of labor income
euler = [S,J] array, Euler error from FOC for labor supply
Returns: euler
if j is not None:
chi_b = p.chi_b[j]
if method == 'TPI':
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
T_H = T_H.reshape(T_H.shape[0], 1)
else:
chi_b = p.chi_b
if method == 'TPI':
r = utils.to_timepath_shape(r, p)
w = utils.to_timepath_shape(w, p)
T_H = utils.to_timepath_shape(T_H, p)
'''
if method == 'SS':
tau_payroll = p.tau_payroll[-1]
elif method == 'TPI_scalar': # for 1st donut ring onlye
tau_payroll = p.tau_payroll[0]
else:
length = r.shape[0]
tau_payroll = p.tau_payroll[t:t + length]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
T_H = T_H.reshape(T_H.shape[0], 1)
tau_payroll = tau_payroll.reshape(tau_payroll.shape[0], 1)
elif b.ndim == 3:
r = utils.to_timepath_shape(r, p)
w = utils.to_timepath_shape(w, p)
T_H = utils.to_timepath_shape(T_H, p)
tau_payroll = utils.to_timepath_shape(tau_payroll, p)
taxes = tax.total_taxes(r, w, b, n, bq, factor, T_H, theta, t, j,
False, method, e, etr_params, p)
cons = get_cons(r, w, b, b_splus1, n, bq, taxes, e, tau_c, p)
deriv = (1 - tau_payroll - tax.MTR_income(r, w, b, n, factor,
False, e, etr_params,
mtrx_params, p))
FOC_error = (marg_ut_cons(cons, p.sigma) * (1 / (1 + tau_c)) * w *
deriv * e - marg_ut_labor(n, chi_n, p))
return FOC_error
def constraint_checker_SS(bssmat, nssmat, cssmat, ltilde):
'''
Checks constraints on consumption, savings, and labor supply in the
steady state.
Inputs:
bssmat = [S,J] array, steady state distribution of capital
nssmat = [S,J] array, steady state distribution of labor
cssmat = [S,J] array, steady state distribution of consumption
ltilde = scalar, upper bound of household labor supply
Functions called: None
Objects in function:
flag2 = boolean, indicates if labor supply constraints violated
(=False if not)
Returns:
# Prints warnings for violations of capital, labor, and
consumption constraints.
'''
print('Checking constraints on capital, labor, and consumption.')
if (bssmat < 0).any():
print('\tWARNING: There is negative capital stock')
flag2 = False
if (nssmat < 0).any():
print('\tWARNING: Labor supply violates nonnegativity ',
'constraints.')
flag2 = True
if (nssmat > ltilde).any():
print('\tWARNING: Labor suppy violates the ltilde constraint.')
flag2 = True
if flag2 is False:
print('\tThere were no violations of the constraints on labor',
' supply.')
if (cssmat < 0).any():
print('\tWARNING: Consumption violates nonnegativity',
' constraints.')
else:
print('\tThere were no violations of the constraints on',
' consumption.')
def constraint_checker_TPI(b_dist, n_dist, c_dist, t, ltilde):
'''
Checks constraints on consumption, savings, and labor supply along
the transition path. Does this for each period t separately.
Inputs:
b_dist = [S,J] array, distribution of capital
n_dist = [S,J] array, distribution of labor
c_dist = [S,J] array, distribution of consumption
t = integer, time period
ltilde = scalar, upper bound of household labor supply
Functions called: None
Objects in function: None
Returns:
# Prints warnings for violations of capital, labor, and
consumption constraints.
'''
if (b_dist <= 0).any():
print('\tWARNING: Aggregate capital is less than or equal to ',
'zero in period %.f.' % t)
if (n_dist < 0).any():
print('\tWARNING: Labor supply violates nonnegativity',
' constraints in period %.f.' % t)
if (n_dist > ltilde).any():
print('\tWARNING: Labor suppy violates the ltilde constraint',
' in period %.f.' % t)
if (c_dist < 0).any():
print('\tWARNING: Consumption violates nonnegativity',
' constraints in period %.f.' % t)
|
from .extension import *
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sortedm2m.fields
class Migration(migrations.Migration):
dependencies = [
('photologue', '0006_auto_20141028_2005'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='photos',
field=sortedm2m.fields.SortedManyToManyField(help_text=None, related_name='galleries', verbose_name='photos', to='photologue.Photo', blank=True),
),
migrations.AlterField(
model_name='gallery',
name='sites',
field=models.ManyToManyField(to='sites.Site', verbose_name='sites', blank=True),
),
migrations.AlterField(
model_name='photo',
name='sites',
field=models.ManyToManyField(to='sites.Site', verbose_name='sites', blank=True),
),
]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import *
import os
import sys
import traceback
import re
import click
from click_anno import click_app
from click_anno.types import flag
from fsoopify import NodeInfo, NodeType, FileInfo, DirectoryInfo, SerializeError
from alive_progress import alive_bar
EXTENSION_NAME = '.hash'
ACCEPT_HASH_TYPES = ('sha1', 'md5', 'crc32', 'sha256')
class IHashAccessor:
def can_read(self, f: FileInfo) -> bool:
raise NotImplementedError
def read(self, f: FileInfo) -> Optional[Dict[str, str]]:
raise NotImplementedError
def write(self, f: FileInfo, h: Dict[str, str]):
raise NotImplementedError
class HashFileHashAccessor(IHashAccessor):
@staticmethod
def _get_checksum_file(f: FileInfo):
return FileInfo(f.path + EXTENSION_NAME)
def can_read(self, f: FileInfo) -> bool:
return self._get_checksum_file(f).is_file()
def read(self, f: FileInfo) -> Optional[Dict[str, str]]:
hash_file = self._get_checksum_file(f)
try:
data = hash_file.load('json')
except (SerializeError, IOError):
return None
else:
if isinstance(data, dict):
return data
return None
def write(self, f: FileInfo, h: Dict[str, str]):
hash_file = self._get_checksum_file(f)
hash_file.dump(h, 'json')
class Crc32SuffixHashAccessor(IHashAccessor):
REGEX = re.compile(r'\((?P<crc32>[0-9a-f]{8})\)$', re.I)
@classmethod
def _try_read_crc32(cls, f: FileInfo) -> Optional[str]:
pure_name = str(f.path.name.pure_name)
match = cls.REGEX.search(pure_name)
if match:
return match.group('crc32')
def can_read(self, f: FileInfo) -> bool:
return self._try_read_crc32(f)
def read(self, f: FileInfo) -> Optional[Dict[str, str]]:
crc32 = self._try_read_crc32(f)
return dict(crc32=crc32)
def write(self, f: FileInfo, h: Dict[str, str]):
raise NotImplementedError
def _get_hash_value(f: FileInfo, hash_names: List[str]) -> Dict[str, str]:
r = {}
with f.get_hasher(*hash_names) as hasher:
with alive_bar(manual=True) as bar:
while hasher.read_block():
bar(hasher.progress)
for name, val in zip(hash_names, hasher.result):
r[name] = val
return r
def _norm_hashvalue(val):
if isinstance(val, str):
return val.lower()
return None
def verify_file(f: FileInfo, accessor: Optional[IHashAccessor]):
if accessor is None:
def iter_accessors():
yield HashFileHashAccessor()
yield Crc32SuffixHashAccessor()
accessors = iter_accessors()
else:
accessors = (accessor, )
data = None
for accessor in accessors:
data = accessor.read(f)
if data is not None:
break
if data is None:
if f.path.name.ext != EXTENSION_NAME:
click.echo('Ignore {} by checksum not found.'.format(
click.style(str(f.path), fg='blue')
))
return
# find hash type:
hash_names = []
saved_hash_value = {}
for hash_name in ACCEPT_HASH_TYPES:
if hash_name in data:
hash_names.append(hash_name)
saved_hash_value[hash_name] = _norm_hashvalue(data[hash_name])
if not hash_names:
click.echo('Ignore {} by no known algorithms.'.format(
click.style(str(f.path), fg='blue')
))
return
click.echo('Verifing {}... '.format(
click.style(str(f.path), fg='blue')
))
actual_hash_value = _get_hash_value(f, hash_names)
click.echo('Result : ', nl=False)
if actual_hash_value == saved_hash_value:
click.echo(click.style("Ok", fg="green") + '.')
else:
click.echo(click.style("Failed", fg="red") + '!')
def create_checksum_file(f: FileInfo, skip_exists: bool, accessor: IHashAccessor):
if skip_exists and accessor.can_read(f):
click.echo('Skiped {} by checksum exists.'.format(
click.style(str(f.path), fg='bright_blue')
), nl=True)
return
hash_name = ACCEPT_HASH_TYPES[0]
click.echo('Computing checksum for {}...'.format(
click.style(str(f.path), fg='bright_blue')
), nl=True)
hash_values = _get_hash_value(f, [hash_name])
data = {}
data[hash_name] = hash_values[hash_name]
accessor.write(f, data)
def _collect_files(paths: list, skip_hash_file: bool) -> List[FileInfo]:
'''
collect a files list
'''
collected_files: List[FileInfo] = []
def collect_from_dir(d: DirectoryInfo):
for item in d.list_items():
if item.node_type == NodeType.file:
collected_files.append(item)
elif item.node_type == NodeType.dir:
collect_from_dir(item)
if paths:
for path in paths:
node = NodeInfo.from_path(path)
if node is not None:
if node.node_type == NodeType.file:
collected_files.append(node)
elif node.node_type == NodeType.dir:
collect_from_dir(node)
else:
click.echo(f'Ignore {path} which is not a file or dir')
# ignore *.hash file
if skip_hash_file:
collected_files = [f for f in collected_files if f.path.name.ext != EXTENSION_NAME]
if collected_files:
click.echo('Found {} files.'.format(
click.style(str(len(collected_files)), fg='bright_blue')
))
else:
click.echo(click.style("Path is required", fg="yellow"))
else:
click.echo(click.style("Path is required", fg="red"))
return collected_files
def make_hash(*paths, skip_exists: flag=True, skip_hash_file: flag=True):
'create *.hash files'
collected_files = _collect_files(paths, skip_hash_file)
accessor = HashFileHashAccessor()
if collected_files:
for f in collected_files:
create_checksum_file(f, skip_exists=skip_exists, accessor=accessor)
def verify_hash(*paths, skip_hash_file: flag=True):
'verify with *.hash files'
collected_files = _collect_files(paths, skip_hash_file)
accessor = None # HashFileHashAccessor()
if collected_files:
for f in collected_files:
verify_file(f, accessor=accessor)
@click_app
class App:
def make(self, *paths, skip_exists: flag=True, skip_hash_file: flag=True):
'create *.hash files'
make_hash(*paths, skip_exists, skip_hash_file)
def verify(self, *paths, skip_hash_file: flag=True):
'verify with *.hash files'
verify_hash(*paths, skip_hash_file)
def main(argv=None):
if argv is None:
argv = sys.argv
try:
App()(argv[1:])
except Exception: # pylint: disable=W0703
traceback.print_exc()
if __name__ == '__main__':
main()
|
from yacs.config import CfgNode as CN
_C = CN()
_C.DEVICE = 'cuda'
_C.MODEL = CN()
_C.MODEL.SCALE_FACTOR = 1
_C.MODEL.DETECTOR_TYPE = 'u-net16' # 'PSPNet'
_C.MODEL.SR = 'DBPN'
_C.MODEL.UP_SAMPLE_METHOD = "deconv" # "pixel_shuffle"
_C.MODEL.DETECTOR_DBPN_NUM_STAGES = 4
_C.MODEL.OPTIMIZER = 'Adam' # SGD
_C.MODEL.NUM_CLASSES = 1
_C.MODEL.NUM_STAGES = 6
_C.MODEL.SR_SEG_INV = False
_C.MODEL.JOINT_LEARNING = True
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 100000
_C.SOLVER.TRAIN_DATASET_RATIO = 0.95
_C.SOLVER.SR_PRETRAIN_ITER = 0
_C.SOLVER.SEG_PRETRAIN_ITER = 0
_C.SOLVER.BATCH_SIZE = 8 # default 16
_C.SOLVER.TASK_LOSS_WEIGHT = 0.5
_C.SOLVER.SEG_LOSS_FUNC = "Dice" # Dice or BCE or WeightedBCE or Boundary or WBCE&Dice or GDC_Boundary
_C.SOLVER.BOUNDARY_DEC_RATIO = 1.0
_C.SOLVER.SR_LOSS_FUNC = "L1" # L1 or Boundary
_C.SOLVER.WB_AND_D_WEIGHT = [6, 1] # [WBCE ratio, Dice ratio]
_C.SOLVER.BCELOSS_WEIGHT = [20, 1] # [True ratio, False ratio]
_C.SOLVER.ALPHA_MIN = 0.01
_C.SOLVER.DECREASE_RATIO = 1.0
_C.SOLVER.SYNC_BATCHNORM = True
_C.SOLVER.NORM_SR_OUTPUT = "all"
_C.SOLVER.LR = 1e-3
_C.SOLVER.LR_STEPS = []
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.WARMUP_FACTOR = 1.0/6
_C.SOLVER.WARMUP_ITERS = 5000
_C.INPUT = CN()
_C.INPUT.IMAGE_SIZE = [448, 448] # H x W
_C.INPUT.MEAN = [0.4741, 0.4937, 0.5048]
_C.INPUT.STD = [0.1621, 0.1532, 0.1523]
_C.DATASET = CN()
_C.DATASET.TRAIN_IMAGE_DIR = 'datasets/crack_segmentation_dataset/train/images'
_C.DATASET.TRAIN_MASK_DIR = 'datasets/crack_segmentation_dataset/train/masks'
_C.DATASET.TEST_IMAGE_DIR = 'datasets/crack_segmentation_dataset/test/images'
_C.DATASET.TEST_MASK_DIR = 'datasets/crack_segmentation_dataset/test/masks'
_C.OUTPUT_DIR = 'output/CSSR_SR-SS'
_C.SEED = 123
_C.BASE_NET = 'weights/vgg16_reducedfc.pth'
|
import streamlit as st
from streamlit_option_menu import option_menu
from prediction import show_predict_page
from explore import show_explore_page
from PIL import Image
st.set_page_config(
page_title="Predict Heart Disease",
)
with st.sidebar:
page = option_menu("", ["Facts and figures", 'Predict'],
icons=['card-image', 'eyeglasses'], menu_icon="cast", default_index=1)
if page == "Predict":
show_predict_page()
elif page == "Facts and figures":
show_explore_page()
else:
st.write("UNKNOWN ERROR OCCURRED !!")
# image = Image.open('heart.png')
# st.image(image, caption='heart')
|
from config.settings.base import *
# install gdal in virtualenv:
VIRTUAL_ENV = os.environ["VIRTUAL_ENV"]
OSGEO_VENV = os.path.join(VIRTUAL_ENV, "Lib/site-packages/osgeo")
GEOS_LIBRARY_PATH = os.path.join(OSGEO_VENV, "geos_c.dll")
GDAL_LIBRARY_PATH = os.path.join(OSGEO_VENV, "gdal302.dll")
PROJ_LIB = os.path.join(VIRTUAL_ENV, "Lib/site-packages/osgeo/data/proj")
os.environ["GDAL_DATA"] = os.path.join(VIRTUAL_ENV, "Lib/site-packages/osgeo/data/gdal")
os.environ["PROJ_LIB"] = PROJ_LIB
os.environ["PATH"] += os.pathsep + str(OSGEO_VENV)
if not os.path.exists(OSGEO_VENV):
print("Unable to find OSGEO_VENV at {}".format(OSGEO_VENV))
if not os.path.exists(GEOS_LIBRARY_PATH):
print("Unable to find GEOS_LIBRARY_PATH at {}".format(GEOS_LIBRARY_PATH))
if not os.path.exists(GDAL_LIBRARY_PATH):
print("Unable to find GDAL_LIBRARY_PATH at {}".format(GDAL_LIBRARY_PATH))
if not os.path.exists(PROJ_LIB):
print("Unable to find PROJ_LIB at {}".format(PROJ_LIB))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
INTERNAL_IPS = ("127.0.0.1",)
MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE
ALLOWED_HOSTS = ["localhost", "127.0.0.1", "192.168.1.167"]
# ALLOWED_HOSTS = ['*']
INSTALLED_APPS += ["debug_toolbar", "django_extensions"]
SECRET_KEY = os.environ.get("SECRET_KEY", "secret")
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.postgis",
"NAME": "fsdviz",
"USER": get_env_variable("PGUSER"),
"PASSWORD": get_env_variable("PGPASSWORD"),
"HOST": "localhost",
}
}
# CORS_ORIGIN_WHITELIST += [
# "localhost:3000",
# ]
|
#!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test program for tiny_par_unittest.py."""
import sys
def main():
if len(sys.argv) > 2:
print(' '.join(sys.argv[2:]))
if len(sys.argv) > 1:
sys.exit(int(sys.argv[1]))
if __name__ == '__main__':
main()
|
from django.db.models.signals import pre_save,post_save
from django.dispatch import receiver
import geoLocApp.models
import geoLocApp.distance
@receiver(pre_save,sender=geoLocApp.models.Coordonnee,dispatch_uid="only_before_registered")
def setDistance(sender, **kwargs):
coordonnee = kwargs["instance"]
position = coordonnee.position
coordonnee.distance = geoLocApp.distance.distance(coordonnee.latitude,position.latitude,coordonnee.longitude,position.longitude)
@receiver(post_save,sender=geoLocApp.models.Position,dispatch_uid="new_position_added")
def new_position(sender,**kwargs):
if kwargs['created']==True:
return ['intance']
else:
return 0
|
import numpy as np
from torch import nn
from torch.nn import functional as F
from model.utils.bbox_tools import generate_anchor_base
from model.utils.creator_tool import ProposalCreator
def normal_init(m, mean, stddev):
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
# Region Proposal Network structure
class RegionProposalNetwork(nn.Module):
def __init__(self, in_channels=512, mid_channels=512, ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32], feat_stride=16, proposal_creator_params=dict()):
super(RegionProposalNetwork, self).__init__()
self.anchor_base = generate_anchor_base(anchor_scales=anchor_scales, ratios=ratios)
self.feat_stride = feat_stride
self.proposal_layer = ProposalCreator(self, **proposal_creator_params)
n_anchor = self.anchor_base.shape[0]
self.conv1 = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
self.score = nn.Conv2d(mid_channels, n_anchor*2, 1, 1, 0)
self.loc = nn.Conv2d(mid_channels, n_anchor*4, 1, 1, 0)
normal_init(self.conv1, 0, 0.01)
normal_init(self.score, 0, 0.01)
normal_init(self.loc, 0, 0.01)
def forward(self, x, img_size, scale=1.):
n, _, height, width = x.shape
rois = list()
roi_indices = list()
anchor = _enumerate_shifted_anchor(np.array(self.anchor_base), self.feat_stride, height, width)
n_anchor = anchor.shape[0]//(height*width)
x = self.conv1(x)
h = F.leaky_relu(x)
rpn_locs = self.loc(h)
rpn_scores = self.score(h)
rpn_locs = rpn_locs.permute(0, 2, 3, 1).contiguous().view(n, -1, 4)
rpn_scores = rpn_scores.permute(0, 2, 3, 1).contiguous().view(n, -1, 2)
rpn_softmax_scores = F.softmax(rpn_scores.view(n, height, width, n_anchor, 2), dim=4)
rpn_fg_scores = rpn_softmax_scores[:, :, :, :, 1].contiguous().view(n, -1)
for i in range(n):
roi = self.proposal_layer(rpn_locs[i].cpu().data.numpy(), rpn_fg_scores[i].cpu().data.numpy(), anchor, img_size, scale=scale)
batch_index = i * np.ones((len(roi),), dtype=np.int32)
rois.append(roi)
roi_indices.append(batch_index)
rois = np.concatenate(rois, axis=0)
roi_indices = np.concatenate(roi_indices, axis=0)
return rpn_locs, rpn_scores, rois, roi_indices, anchor
def _enumerate_shifted_anchor(anchor_base, feat_stride, height, width):
shift_x = np.arange(0, width*feat_stride, feat_stride)
shift_y = np.arange(0, height*feat_stride, feat_stride)
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shift = np.stack((shift_y.ravel(), shift_x.ravel(), shift_y.ravel(), shift_x.ravel()), axis=1)
# Get shifted anchors
A = anchor_base.shape[0]
K = shift.shape[0]
anchor = anchor_base.reshape((1, A, 4)) + shift.reshape((1, K, 4)).transpose((1, 0, 2))
anchor = anchor.reshape((K * A, 4)).astype(np.float32)
return anchor
|
import numpy as np
from patsy import dmatrix
from pywt import wavedec, waverec, dwt_max_level
from ..blocks import block_apply
from ...channel_map import ChannelMap
from ...devices.electrode_pinouts import get_electrode_map
def poly_residual(field: np.ndarray, channel_map: ChannelMap, mu: bool=False, order: int=2):
"""
Subtract a polynomial spatial trend from an array recording.
Parameters
----------
field:
channel_map:
mu:
order:
Returns
-------
resid: np.ndarray
Residual (field minus trend)
"""
ii, jj = channel_map.to_mat()
u = np.ones_like(ii)
X = u[:, np.newaxis]
if order > 0:
X = np.c_[u, ii, jj]
if order > 1:
X = np.c_[X, ii * jj, ii ** 2, jj ** 2]
beta = np.linalg.lstsq(X, field, rcond=None)[0]
if mu:
return np.dot(X, beta)
return field - np.dot(X, beta)
def despeckle_fields(field: np.ndarray, channel_map: ChannelMap, pin_code: np.ndarray, trend_order: int=2,
board_code: np.ndarray=None, return_design: bool=False):
"""
Subtract offsets common to pinout groups that can appear as "speckling" in array images (depending on how the
pins map to electrode sites). The pin groups may also be nested in a second level of grouping that
corresponds to multiple acquisition boards.
Parameters
----------
field :
channel_map :
pin_code :
trend_order :
board_code :
return_design :
Returns
-------
"""
pin_code = np.asarray(pin_code)
factors = dict(pins=pin_code > 0)
if board_code is not None:
board_code = np.asarray(board_code)
factors['board'] = board_code
dm = dmatrix('~0 + C(pins):C(board)', factors)
else:
dm = dmatrix('~0 + C(pins)', factors)
if return_design:
return dm
p_resid = poly_residual(field, channel_map, order=trend_order)
poly = field - p_resid
rho = np.linalg.lstsq(dm, p_resid, rcond=None)[0]
p_resid -= np.dot(dm, rho)
return poly + p_resid
def despeckle_recording(field: np.ndarray, channel_map: ChannelMap,
map_lookup: tuple=(), pin_info: tuple=(),
wavelet: str='haar', block_size: int=10000, trend_order: int=2):
# method 1) lookup the pinout codes from channel map name and other info
if map_lookup:
map_name, map_mask, map_connectors = map_lookup
pin_code, board_code = get_electrode_map(map_name, connectors=map_connectors, pin_codes=True)[-2:]
if map_mask is not None:
pin_code = np.asarray(pin_code)[map_mask]
if board_code is not None:
board_code = np.asarray(board_code)[map_mask]
elif pin_info:
pin_code, board_code = pin_info
else:
raise ValueError('Need either map info or pin info to determine pin coding')
def despeckle_block(block):
n = block.shape[1]
if wavelet:
levels = dwt_max_level(n, wavelet)
block_coefs = wavedec(block, wavelet, level=levels - 3)
else:
block_coefs = [block]
corrected = list()
for b in block_coefs:
c = despeckle_fields(b, channel_map, pin_code, board_code=board_code, trend_order=trend_order)
corrected.append(c)
if wavelet:
corrected = waverec(corrected, wavelet)
else:
corrected = corrected[0]
return corrected
return block_apply(despeckle_block, block_size, (field,))
|
"""Clean processed folder.
Usage: clean [--output_dir=<output_dir>] [<directory>]
"""
import argparse
import pathlib
import shutil
import os
def main(source_dir):
if source_dir.exists():
shutil.rmtree(source_dir)
zip_path = source_dir.parent / "manuscript.zip"
if zip_path.exists():
os.remove(zip_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Clean processed folder.')
parser.add_argument(
'--dir',
type=str,
default=pathlib.Path(__file__).parent.parent / 'condensed'
)
args = parser.parse_args()
main(args.dir)
|
# This filter procedurally generates 4 structures within the selection box within defined limits
# This filter: mcgreentn@gmail.com (mikecgreen.com)
import time # for timing
from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2, acos, asin
from random import *
from numpy import *
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from mcplatform import *
import utilityFunctions as utilityFunctions
#inputs are taken from the user. Here I've just showing labels, as well as letting the user define
# what the main creation material for the structures is
inputs = (
("Cellular Automata SG Example", "label"),
("Material", alphaMaterials.Cobblestone), # the material we want to use to build the mass of the structures
("Creator: Michael Green", "label"),
)
# MAIN SECTION #
# Every agent must have a "perform" function, which has three parameters
# 1: the level (aka the minecraft world). 2: the selected box from mcedit. 3: User defined inputs from mcedit
def perform(level, box, options):
yards = binaryPartition(box)
# for each quadrant
for yard in yards:
buildFence(level, yard)
buildStructure(level, yard, options)
#splits the given box into 4 unequal areas
def binaryPartition(box):
partitions = []
# create a queue which holds the next areas to be partitioned
queue = []
queue.append(box)
# for as long as the queue still has boxes to partition...
count = 0
while len(queue) > 0:
count += 1
splitMe = queue.pop(0)
(width, height, depth) = utilityFunctions.getBoxSize(splitMe)
# print "Current partition width,depth",width,depth
centre = 0
# this bool lets me know which dimension I will be splitting on. It matters when we create the new outer bound size
isWidth = False
# find the larger dimension and divide in half
# if the larger dimension is < 10, then block this from being partitioned
minSize = 12
if width > depth:
# roll a random die, 1% change we stop anyways
chance = random.randint(100)
if depth < minSize or chance == 1:
partitions.append(splitMe)
continue
isWidth = True
centre = width / 2
else:
chance = random.randint(10)
if width < minSize or chance == 1:
partitions.append(splitMe)
continue
centre = depth / 2
# a random modifier for binary splitting which is somewhere between 0 and 1/16 the total box side length
randomPartition = random.randint(0, (centre / 8) + 1)
# creating the new bound
newBound = centre + randomPartition
#creating the outer edge bounds
outsideNewBounds = 0
if isWidth:
outsideNewBound = width - newBound - 1
else:
outsideNewBound = depth - newBound - 1
# creating the bounding boxes
# NOTE: BoundingBoxes are objects contained within pymclevel and can be instantiated as follows
# BoundingBox((x,y,z), (sizex, sizey, sizez))
# in this instance, you specifiy which corner to start, and then the size of the box dimensions
# this is an if statement to separate out binary partitions by dimension (x and z)
if isWidth:
queue.append(BoundingBox((splitMe.minx, splitMe.miny, splitMe.minz), (newBound-1, 256, depth)))
queue.append(BoundingBox((splitMe.minx + newBound + 1, splitMe.miny, splitMe.minz), (outsideNewBound - 1, 256, depth)))
else:
queue.append(BoundingBox((splitMe.minx, splitMe.miny, splitMe.minz), (width, 256, newBound - 1)))
queue.append(BoundingBox((splitMe.minx, splitMe.miny, splitMe.minz + newBound + 1), (width, 256, outsideNewBound - 1)))
return partitions
# builds a wooden fence around the perimeter of this box, like this photo
# Top - zmax
# ----------------
# | |
# | |
# | |
# Left | | Right
# xmin | | xmax
# | |
# | |
# ----------------
# Bottom - zmin
def buildFence(level, box):
# side by side, go row/column by row/column, and move down the pillar in the y axis starting from the top
# look for the first non-air tile (id != 0). The tile above this will be a fence tile
# add top fence blocks
for x in range(box.minx, box.maxx):
for y in xrange(box.maxy, box.miny, -1):
# get this block
tempBlock = level.blockAt(x, y, box.maxz)
if tempBlock != 0:
newValue = 0
utilityFunctions.setBlock(level, (85, newValue), x, y+1, box.maxz)
break;
# add bottom fence blocks (don't double count corner)
for x in range(box.minx, box.maxx):
for y in xrange(box.maxy, box.miny, -1):
# get this block
tempBlock = level.blockAt(x, y, box.minz)
if tempBlock != 0:
newValue = 0
utilityFunctions.setBlock(level, (85, newValue), x, y+1, box.minz)
break;
# add left fence blocks (don't double count corner)
for z in range(box.minz+1, box.maxz):
for y in xrange(box.maxy, box.miny, -1):
# get this block
tempBlock = level.blockAt(box.minx, y, z)
if tempBlock != 0:
newValue = 0
utilityFunctions.setBlock(level, (85, newValue), box.minx, y+1, z)
break;
# add right fence blocks
for z in range(box.minz, box.maxz+1):
for y in xrange(box.maxy, box.miny, -1):
# get this block
tempBlock = level.blockAt(box.maxx, y, z)
if tempBlock != 0:
newValue = 0
utilityFunctions.setBlock(level, (85, newValue), box.maxx, y+1, z)
break;
# builds a structure (the material of which is specified by user in inputs) within the given box
# 4 steps:
# 1. decide the floor plan, map out the foundations of the building, build floor
# 2. create corner pillars
# 3. between each pair of pillars, use Cellular Automata to build a wall
# 4. create celing at the ceiling level
def buildStructure(level, box, options):
floor = makeFloorPlan(level, box)
buildingHeightInfo = createPillars(level, floor, options)
generateWalls(level, floor, buildingHeightInfo, options)
generateCeiling(level, floor, buildingHeightInfo, options)
def makeFloorPlan(level, box):
# we have to first figure out where in the box this is going to be
# find the box dimensions
(width, height, depth) = utilityFunctions.getBoxSize(box)
# get sixths
fractionWidth = width / 6
fractionDepth = depth / 6
# create the box boundaries
randFracx = random.randint(0, fractionWidth+1)
randFracz = random.randint(0, fractionDepth+1)
xstart = box.minx + randFracx + 2
zstart = box.minz + randFracz + 2
xsize = width * 0.6 - randFracx
zsize = depth * 0.6 - randFracz
floorplan = BoundingBox((xstart, box.miny, zstart), (xsize, box.maxy, zsize))
return floorplan
# we need to create the corners for the walls.
#Every building needs corners for stability...unless you are inventive... :)
def createPillars(level, floor, options):
cornerBlockStarts = []
ycoords = []
# similarly to fences, we need to countdown on each of the four corners and find the block where the ground starts, then start building pillars above that height
midpointFloorHeight = 0
for y in xrange(floor.maxy, floor.miny, -1):
# get this block
tempBlock = level.blockAt(floor.minx, y, floor.minz)
if tempBlock != 0:
cornerBlockStarts.append((floor.minx, y+1, floor.minz))
break;
for y in xrange(floor.maxy, floor.miny, -1):
# get this block
tempBlock = level.blockAt(floor.minx, y, floor.maxz)
if tempBlock != 0:
cornerBlockStarts.append((floor.minx, y+1, floor.maxz))
break;
for y in xrange(floor.maxy, floor.miny, -1):
# get this block
tempBlock = level.blockAt(floor.maxx, y, floor.minz)
if tempBlock != 0:
cornerBlockStarts.append((floor.maxx, y+1, floor.minz))
break;
for y in xrange(floor.maxy, floor.miny, -1):
# get this block
tempBlock = level.blockAt(floor.maxx, y, floor.maxz)
if tempBlock != 0:
cornerBlockStarts.append((floor.maxx, y+1, floor.maxz))
break;
# now we have all four corners. for each, pick a random y value between 5 and 45, and build up using stone
ystartCoordMax = -10000
for cornerstone in cornerBlockStarts:
midpointFloorHeight += cornerstone[1]
if(cornerstone[1] > ystartCoordMax):
ystartCoordMax = cornerstone[1]
pillarheight = random.randint(5, 45)
for y in range(0, pillarheight):
utilityFunctions.setBlock(level, (options["Material"].ID,0), cornerstone[0], cornerstone[1]+y, cornerstone[2])
if(y==pillarheight-1):
# add y to our y coords, which will be used to determine building height for the roof
ycoords.append(y)
allYs = 0
for ycoord in ycoords:
allYs += ycoord
yavg = allYs / 4
midpointFloorHeight = midpointFloorHeight / 4
# print("Average pillar height: ", yavg)
return (yavg, ystartCoordMax, midpointFloorHeight)
# the walls of the building are generated each using independent ceullular automata. We look at the immediate neighborhood and take action
# cellular automata is done in 3 easy steps
# 1. intitialize with random block placement in the space
# 2. evaluate each cell, checking its neighbors to gauge changes
# 3. repeat 2 until satisfied
def generateWalls(level, floor, buildingHeightInfo, options):
print "Generating walls"
# actual automata is going to be simulated in a matrix (it's much faster than rendering it in minecraft)
# first we should define the matrix properties (i.e. width and height)
(width, boxheight, depth) = utilityFunctions.getBoxSize(floor)
height = buildingHeightInfo[0]
print "X walls"
for k in range(2):
# we have our matrix for CA, now lets do CA
matrix = [[0 for x in range(width)] for y in range(height)]
matrixnew = randomlyAssign(matrix, width, height)
# do 3 generations
for gen in range(0,2):
# print "Generation ", gen
matrixnew = cellularAutomataGeneration(matrixnew, width, height)
#after generation is over, place the walls according to the wall matrix, starting at the floor
for y in range(height):
for x in range(1,width):
if k==1:
# print "boom 1"
if matrixnew[y][x] == 1:
utilityFunctions.setBlock(level, (options["Material"].ID, 0), floor.minx+x, buildingHeightInfo[2] + y, floor.minz)
else:
utilityFunctions.setBlock(level, (20, 0), floor.minx+x, buildingHeightInfo[2] + y, floor.minz)
else:
# print "boom 2"
if matrixnew[y][x] == 1:
utilityFunctions.setBlock(level, (options["Material"].ID, 0), floor.minx+x, buildingHeightInfo[2] + y, floor.maxz)
else:
utilityFunctions.setBlock(level, (20, 0), floor.minx+x, buildingHeightInfo[2] + y, floor.maxz)
print "Z Walls"
for k in range(2):
# we have our matrix for CA, now lets do CA
matrix = [[0 for x in range(depth)] for y in range(height)]
matrixnew = randomlyAssign(matrix, depth, height)
# do 25 generations
for gen in range(0,25):
print "Generation ", gen
matrixnew = cellularAutomataGeneration(matrixnew, depth, height)
#after generation is over, place the walls according to the wall matrix, starting at the floor
for y in range(height):
for z in range(1,depth):
if k==1:
# print "boom 3"
if matrixnew[y][z] == 1:
utilityFunctions.setBlock(level, (options["Material"].ID, 0), floor.minx, buildingHeightInfo[2] + y, floor.minz+z)
else:
utilityFunctions.setBlock(level, (20, 0), floor.minx, buildingHeightInfo[2] + y, floor.minz+z)
else:
# print "boom 4"
if matrixnew[y][z] == 1:
utilityFunctions.setBlock(level, (options["Material"].ID, 0), floor.maxx, buildingHeightInfo[2] + y, floor.minz+z)
else:
utilityFunctions.setBlock(level, (20, 0), floor.maxx, buildingHeightInfo[2] + y, floor.minz+z)
def randomlyAssign(matrix, width, height):
print 'randomly assigning to matrix'
for j in range(height):
for i in range(width):
# print j,i
matrix[j][i] = random.randint(0,2)
return matrix
def cellularAutomataGeneration(matrix, width, height):
for j in range(height):
for i in range(width):
# print j,i
if j == 0 : #special case for bottom
matrix[j][i] = decideCell(1, matrix[j+1][i])
elif j == height-1 : #special case for top
matrix[j][i] = decideCell(matrix[j-1][i], 1)
else:
matrix[j][i] = decideCell(matrix[j-1][i], matrix[j+1][i])
return matrix
# the rules for cellular automata are as follows:
# look above and below me.
# If one of my neighbors is 0, I have a 50% chance to be 0
# If both of my neighbors are 0, I am a 1
# If both of my neighbors are 1, I am a 0
def decideCell(top, bottom):
if top + bottom == 1:
chance = random.randint(0, 100)
if chance < 50:
return 0
else:
return 1
elif top + bottom == 0:
return 1
elif top + bottom == 2:
return 0
# puts a cap on the building in question
# uses the floor to determine the celing size, and the buildingHeightInfo tuple
# to place it at the right level
def generateCeiling(level, floor, buildingHeightInfo, options):
print "generating ceiling"
for x in range(floor.minx, floor.maxx+1):
for z in range(floor.minz, floor.maxz+1):
utilityFunctions.setBlock(level, (options["Material"].ID, 0), x, buildingHeightInfo[2] + buildingHeightInfo[0], z)
# scan all the blocks above me and make them air (all the way to maxy)
for y in range(buildingHeightInfo[2] + buildingHeightInfo[0] + 1, 256):
utilityFunctions.setBlock(level, (0, 0), x, y, z)
|
"""Basic toolkit for asynchronous task communication / management using friendly threaded queues."""
# USE EXAMPLES & TESTING TO BE COMPLETED.
from threading import Thread
from multiprocessing import Pool, Process
from multiprocessing.context import TimeoutError as TimesUpPencilsDown
from multiprocessing import Queue as MultiQueue
from queue import Queue
from queue import Empty as EmptyQueue
from constants import KILL, DONE, CZEC
def clear_and_close_queues(*queues):
"""
Close an arbitrary-length tuple of multiprocessing and queue lib queue(s).
:Parameters:
:param queues: queue.Queues and/or multiprocessing.Queues to be cleared and closed.
:rtype: None
:return: None
"""
for queue in queues:
clear_queues(queue)
try:
queue.close()
except AttributeError:
with queue.mutex:
queue.queue.clear()
def clear_queues(*queues):
"""
Remove remaining items in an arbitrary-length tuple of multiprocessing and queue lib queue(s).
:Parameters:
:param queues: queue.Queues and/or multiprocessing.Queues to be cleared.
:rtype: None
:return: None
"""
for queue in queues:
try:
while not queue.empty():
_ = queue.get()
except OSError:
pass
class ProcessHost(object):
"""
Multiprocessing/threading object which can be accessed directly within libraries like Tkinter.
Send a target function to an instance of this class during __init__ or make_single_process_handler, and it will
ensure the process completes, and pass any queue return messages to the function provided in message_callback
during __init__.
"""
def __init__(self, root, message_callback, process_target=None, *process_args,
message_check_delay=1000,
running_check_delay=10000,
run_process=True,
host_to_process_signals=None,
finished_signal=DONE,
kill_signal=KILL,
check_signal=CZEC,
**process_kwarg_dict):
"""Create private inter-process communication for a potentially newly started process.
:Parameters:
:param Tkinter.Tk root: root / object with .after(delay, callback) used for scheduling.
:param function message_callback: function / method used to process a message if received.
:param function process_target: function / method to be run asynchronously.
:param process_args: positional arguments to be passed to process_target.
:param int message_check_delay: how often message checks are scheduled using root.
:param int running_check_delay: how often checks on whether the subprocess is running are run.
:param bool run_process: determines whether to run the process_target immediately after __init__.
:param set host_to_process_signals: messages for the asynchronous process which may be sent to the handler.
:param str finished_signal: message to be used to indicate that the asynchronous process finished.
:param str kill_signal: message to be used to finish the asynchronous process early.
:param str check_signal: message to be used to check if the asynchronous process is still alive.
:param process_kwarg_dict: dictionary to be passed to process_target.
:rtype: None
:return: None
"""
self.root = root
self.message_check_rate = message_check_delay
self.running_check_delay = running_check_delay
self.message_callback = message_callback
self._to_host_queue = Queue() # Please respect the privacy of these attributes. Altering them without
self._to_handler_queue = MultiQueue() # consideration for processes relying on their private state can have
self.kill_signal = kill_signal # unintended process opening / closing, especially with reuse.
self.finished_signal = finished_signal
self.check_signal = check_signal
self.is_running = False
self._continue_running = run_process
self._current_processor = None
assert (self.kill_signal
!= self.finished_signal
!= self.check_signal
!= self.kill_signal), "Use unique built-in queue signals."
self.process_end_signals = {self.kill_signal, self.finished_signal, self.check_signal}
if process_target is not None and run_process:
self.make_single_process_handler(process_target,
*process_args,
host_to_process_signals=host_to_process_signals,
**process_kwarg_dict)
def make_single_process_handler(self, process_target, *process_args,
host_to_process_signals=None,
**process_kwarg_dict):
"""
Create a process handler and Tkinter threads for process callbacks.
:Parameters:
:param function process_target: function / method to be run asynchronously.
:param process_args: positional arguments to be passed to process_target.
:param set host_to_process_signals: messages for the asynchronous process which may be sent to the handler.
:param process_kwarg_dict: dictionary to be passed to process_target.
:rtype: None
:return: None
"""
assert not self.is_running, ("Please create a new SingleProcessHandler to start another process while this one "
"is still running.")
_handler_to_process_queue = MultiQueue() if host_to_process_signals else None
self._continue_running = True
self.is_running = True
self._current_processor = SingleProcessHandler(process_target,
self._to_handler_queue,
self._to_host_queue, *process_args,
handler_to_process_queue=_handler_to_process_queue,
finished_signal=self.finished_signal,
kill_signal=self.kill_signal,
check_signal=self.check_signal,
host_to_process_signals=host_to_process_signals,
**process_kwarg_dict)
self._current_processor.start()
self.root.after(self.message_check_rate, self.check_message)
self.root.after(self.running_check_delay, self.check_running)
def send_signal(self, signal):
"""
Send signal to other process.
:Parameters:
:param signal: pickle-able object sent to subprocess.
:rtype: None
:return: None
"""
self._to_handler_queue.put(signal)
def check_message(self, *, message_callback=None):
"""
Initiate callbacks from inter-process communication.
:Parameters:
:param function message_callback: function / method used to process a message if received. Currently
determines if check_message is subsequently called as well. If intending to check for a message
independent of the auto-check (and not intending to start another check_message thread chain)
pass self.message_callback or any other function as a parameter to prevent the check_message chain.
:rtype: None
:return: None
"""
if message_callback is None:
say_check_one_more_time = self._continue_running
message_callback = self.message_callback
else:
say_check_one_more_time = False
try:
if not self._to_host_queue.empty():
try:
msg = self._to_host_queue.get_nowait()
except EmptyQueue:
pass
else:
if isinstance(msg, str):
# print("{} for host.".format(msg))
if msg in self.process_end_signals:
say_check_one_more_time = False
self.kill_process(need_to_signal=False)
message_callback(msg)
finally:
if say_check_one_more_time:
self.root.after(self.message_check_rate, self.check_message)
elif say_check_one_more_time:
self.root.after(self.message_check_rate, self.check_message)
except AttributeError:
self.kill_process()
def kill_process(self, *, need_to_signal=True):
"""
End current process / clear queues.
:Parameters:
:param bool need_to_signal: determines if a signal is sent to the process handler to end. Needs to be
True unless a signal has already been sent to the process handler.
:rtype: None
:return: None
"""
self._continue_running = False
if (self._current_processor is not None
and self._current_processor.is_alive()):
if need_to_signal:
self._to_handler_queue.put(self.kill_signal)
clear_queues(self._to_host_queue)
self._current_processor.join()
clear_queues(self._to_host_queue, self._to_handler_queue)
self._current_processor = None
self.is_running = False
def check_running(self):
"""
Maintain communication with subprocess to ensure it's running.
:rtype: None
:return: None
"""
if (self._continue_running
and self._current_processor is not None
and self._current_processor.is_alive()):
self._to_handler_queue.put(self.check_signal)
self.root.after(self.running_check_delay, self.check_running)
class GreedyProcessHost(ProcessHost):
"""
Multiprocessing/threading object which can be accessed directly within libraries like Tkinter.
Send a target function to an instance of this class during __init__ or make_single_process_handler, and it will
ensure the process completes, and pass the most recent queue return messages to the function provided in
message_callback during __init__ - while keeping the rest to itself. (Rude.)
"""
def __init__(self, *args, **kwargs):
"""Create private inter-process communication for a potentially newly started process.
:Parameters:
:param Tkinter.Tk root: root / object with .after(delay, callback) used for scheduling.
:param function message_callback: function / method used to process the most recent message if received.
:param function process_target: function / method to be run asynchronously.
:param process_args: positional arguments to be passed to process_target.
:param int message_check_delay: how often message checks are scheduled using root.
:param int running_check_delay: how often checks on whether the subprocess is running are run.
:param bool run_process: determines whether to run the process_target immediately after __init__.
:param set host_to_process_signals: messages for the asynchronous process which may be sent to the handler.
:param str finished_signal: message to be used to indicate that the asynchronous process finished.
:param str kill_signal: message to be used to finish the asynchronous process early.
:param str check_signal: message to be used to check if the asynchronous process is still alive.
:param process_kwarg_dict: dictionary to be passed to process_target.
:rtype: None
:return: None
"""
super(GreedyProcessHost, self).__init__(*args, **kwargs)
def check_message(self, *, message_callback=None):
"""
Initiate callbacks from inter-process communication. Overwrites the original check_message method in order
to only pull the most recent queue item.
:Parameters:
:param function message_callback: function / method used to process a message if received. Currently
determines if check_message is subsequently called as well. If intending to check for a message
independent of the auto-check (and not intending to start another check_message thread chain)
pass self.message_callback or any other function as a parameter to prevent the check_message chain.
:rtype: None
:return: None
"""
if message_callback is None:
say_check_one_more_time = self._continue_running
message_callback = self.message_callback
else:
say_check_one_more_time = False
try:
if not self._to_host_queue.empty():
msg = None
try:
while not self._to_host_queue.empty():
msg = self._to_host_queue.get_nowait()
except EmptyQueue:
pass
else:
if isinstance(msg, str):
# print("{} for greedy host.".format(msg))
if msg in self.process_end_signals:
say_check_one_more_time = False
self.kill_process(need_to_signal=False)
message_callback(msg)
finally:
if say_check_one_more_time:
self.root.after(self.message_check_rate, self.check_message)
elif say_check_one_more_time:
self.root.after(self.message_check_rate, self.check_message)
except AttributeError:
self.kill_process()
class SingleProcessHandler(Thread):
"""Manages single asynchronous processes - nothing in this object should be interacted with directly."""
def __init__(self, process_target, to_handler_queue, handler_to_host_queue, *process_args,
handler_to_process_queue=None,
finished_signal=DONE,
kill_signal=KILL,
check_signal=CZEC,
host_to_process_signals=None,
**process_kwarg_dict):
"""
Set runtime attributes for multi-process communication / management.
:Parameters:
:param function process_target: function / method to be run asynchronously.
:param multiprocessing.Queue to_handler_queue: queue for all communications sent to this class instance.
:param queue.Queue handler_to_host_queue: queue for communications to the host process from this instance.
:param process_args: positional arguments to be passed to process_target.
:param multiprocessing.Queue handler_to_process_queue: queue for communications from the host process
to the running asynchronous process_target.
:param str finished_signal: message to be used to indicate that the asynchronous process finished.
:param str kill_signal: message to be used to finish the asynchronous process early.
:param str check_signal: message to be used to check if the asynchronous process is still alive.
:param set host_to_process_signals: messages for the asynchronous process which may be sent to the handler.
:param process_kwarg_dict: dictionary to be passed to process_target.
:rtype: None
:return: None
"""
Thread.__init__(self)
self.host_to_process_signals = host_to_process_signals if host_to_process_signals else {}
self.kill_signal = kill_signal
self.finished_signal = finished_signal
self.check_signal = check_signal
assert (self.kill_signal
!= self.finished_signal
!= self.check_signal
!= self.kill_signal), "Use unique built-in queue signals."
self.end_sigs = {self.kill_signal, self.finished_signal}
self.handler_to_host_queue = handler_to_host_queue
self.handler_to_process_queue = handler_to_process_queue
self.to_handler_queue = to_handler_queue
self.process_target = process_target
self.process_args = None
self._import_process_args(process_args, process_kwarg_dict)
self.handled_process = None
def _import_process_args(self, process_args=None, process_kwarg_dict=None):
"""
Create the tuple of process args needed for multiprocessing.Process.
:Parameters:
:param process_args: positional arguments to be passed to process_target.
:param process_kwarg_dict: dictionary to be passed to process_target.
:rtype: None
:return: None
"""
if process_args and process_kwarg_dict:
self.process_args = process_args + (process_kwarg_dict,)
elif process_args:
self.process_args = process_args
elif process_kwarg_dict:
self.process_args = (process_kwarg_dict,)
if self.handler_to_process_queue:
if self.process_args:
self.process_args = (self.to_handler_queue, self.handler_to_process_queue) + self.process_args
else:
self.process_args = (self.to_handler_queue, self.handler_to_process_queue)
elif self.process_args:
self.process_args = (self.to_handler_queue,) + self.process_args
else:
self.process_args = (self.to_handler_queue,)
def run(self):
"""
Start / maintain process communication.
:rtype: None
:return: None
"""
self.handled_process = Process(target=self.process_target,
args=self.process_args)
self.handled_process.start()
should_run = True
while should_run:
should_run = self._process_queues()
def _process_queues(self):
"""
Transmit / interpret signals between processes.
:rtype: bool
:return bool should_run: determine whether the run() loop should continue.
"""
should_run = True
msg = self.to_handler_queue.get()
if isinstance(msg, str):
# print("{} for handler.".format(msg))
if msg in self.end_sigs:
self._kill_process()
self.handler_to_host_queue.put(msg)
should_run = False
elif msg == self.check_signal:
if not self.handled_process.is_alive():
self.handler_to_host_queue.put(msg)
should_run = False
elif msg in self.host_to_process_signals:
self.handler_to_process_queue.put(msg)
else:
self.handler_to_host_queue.put(msg)
else:
self.handler_to_host_queue.put(msg)
return should_run
def _kill_process(self):
"""
Handle queue / process cleanup for end-process signals.
:rtype: None
:return: None
"""
if self.handled_process is not None:
if self.handler_to_process_queue:
self._okay_maybe_some_tears_but_be_quick()
else:
self._shh_no_more_tears(self.handled_process, self.to_handler_queue)
self.handled_process = None
def _okay_maybe_some_tears_but_be_quick(self):
"""
Close process while allowing for one to-process-queue signal for cleanup.
:rtype: None
:return: None
"""
self.handler_to_process_queue.put(self.kill_signal)
self.handler_to_process_queue = None
while True:
msg = self.to_handler_queue.get()
if isinstance(msg, self.finished_signal.__class__):
if msg == self.finished_signal:
break
self._shh_no_more_tears(self.handled_process, self.to_handler_queue)
@classmethod
def _shh_no_more_tears(cls, process, queue_process_populates):
"""
Close process without queue signal for cleanup.
:rtype: None
:return: None
"""
if process.is_alive():
process.terminate()
clear_and_close_queues(queue_process_populates)
process.join()
class PoolProcessHandler(Thread):
"""Manages pool'd asynchronous processes."""
def __init__(self, run_target, return_queue, pool_args, *, pool_size=4, time_limit=15):
"""
Set runtime attributes for a pooled multiprocessing application.
:Parameters:
:param function run_target: function / method to be run asynchronously - called once per pool_arg.
:param queue.Queue return_queue: queue to return the results of run_target(s).
:param list pool_args: list of objects to be mapped to run_target instances.
:param int or None pool_size: number of sub-processes to be mapped to run_target.
:param int or None time_limit: amount of time to await the results of run_target.
:rtype: None
:return: None
"""
Thread.__init__(self)
self.run_target = run_target
self.return_queue = return_queue
self.pool_args = pool_args
self.time_limit = time_limit
self.pool_size = pool_size
def run(self):
"""
Start pool'd process and return results using queue from __init__.
:rtype: None
:return: None
"""
with Pool(self.pool_size) as pool:
result = pool.map_async(self.run_target, self.pool_args)
try:
results_list = result.get(timeout=self.time_limit)
except TimesUpPencilsDown:
results_list = None
self.return_queue.put(results_list)
|
import numpy as np
import pandas as pd
import pytest
from pyabc.storage.dataframe_bytes_storage import df_from_bytes, df_to_bytes
@pytest.fixture(
params=[
"empty",
"int",
"float",
"non_numeric_str",
"numeric_str",
"int-float-numeric_str",
"int-float-non_numeric_str-str_ind",
"int-float-numeric_str-str_ind",
]
)
def df(request):
par = request.param
if par == "empty":
return pd.DataFrame()
if par == "int":
return pd.DataFrame(
{
"a": np.random.randint(-20, 20, 100),
"b": np.random.randint(-20, 20, 100),
}
)
if par == "float":
return pd.DataFrame(
{"a": np.random.randn(100), "b": np.random.randn(100)}
)
if par == "non_numeric_str":
return pd.DataFrame({"a": ["foo", "bar"], "b": ["bar", "foo"]})
if par == "numeric_str":
return pd.DataFrame(
{
"a": list(map(str, np.random.randn(100))),
"b": list(map(str, np.random.randint(-20, 20, 100))),
}
)
if par == "int-float-numeric_str":
return pd.DataFrame(
{
"a": np.random.randint(-20, 20, 100),
"b": np.random.randn(100),
"c": list(map(str, np.random.randint(-20, 20, 100))),
}
)
if par == "int-float-non_numeric_str-str_ind":
return pd.DataFrame(
{"a": [1, 2], "b": [1.1, 2.2], "c": ["foo", "bar"]},
index=["first", "second"],
)
if par == "int-float-numeric_str-str_ind":
return pd.DataFrame(
{"a": [1, 2], "b": [1.1, 2.2], "c": ["1", "2"]},
index=["first", "second"],
)
raise Exception("Invalid Test DataFrame Type")
def test_serialize(df):
serial = df_to_bytes(df)
assert isinstance(serial, bytes)
rebuilt = df_from_bytes(serial)
assert (df == rebuilt).all().all()
|
""" Provide a sphinx extension to generate widgets from code: ada.
A role :code-config: must be seen before parsing the first code: block,
for instance
:code-config:`run_button=True;prove_button=False;accumulate_code=True`
See doc in the function codeconfig.
Code accumulation: cancel it with an empty :code-config: directive
This plugin interprets the folloging parameters to the code:: directive:
* no_button - removes all buttons
* <X>_button - forces the existence of a button for mode X.
Modes are defined in the MODES variable in editors.js.
these override the code-config setting.
The code inside code:: directives is extracted into a list of files.
The files are extracted the following way:
- for valid Ada code, 'gnatchop' is run on the entirety of the
snippet
- for C code, the files should be named explicitely, with a marker of
the form
!<basename>
placed at the beginning of each file in the snippet. This mechanism
is also activated if the argument manual_chop is passed to the
code:: directive. For instance:
.. code:: prove_button manual_chop
!main.c
int main(void);
!t.ads
package T is
end T;
"""
import codecs
import os
import re
import shutil
import subprocess
import tempfile
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from xml.sax.saxutils import escape
WIDGETS_SERVER_URL = os.environ.get(
"CODE_SERVER_URL",
"https://cloudchecker-staging.r53.adacore.com")
template = u"""
<div class="widget_editor"
example_server="{server_url}"
{extra_attribs}
inline="true">
{files_divs}
{shadow_files_divs}
</div>
"""
NAME_REGEX = re.compile('(lab|project)=(\S+)')
LAB_IO_START_REGEX = re.compile("-- START LAB IO BLOCK")
LAB_IO_END_REGEX = re.compile("-- END LAB IO BLOCK")
LABIO_FILENAME = "lab_io.txt"
codeconfig_found = False
# A safeguard against documents not defining code-config. Is it needed?
# These are configured via the "code-config" role, see doc in the
# function codeconfig below.
class Config(object):
buttons = set()
# The list of active buttons. Strings of the form 'xxx_button'.
accumulate_code = False
reset_accumulator = False
config = Config()
accumulated_files = {}
# The accumulated files. Key: basename, value: lastest content seen
def c_chop(lines):
"""Chops the text, counting on filenames being given in the form
!<filename>
as the first line of each file.
Returns a list of tuples of the form (basename, contents)
"""
results = []
current_filename = None
current_contents = []
for j in lines:
if j.startswith('!'):
if current_filename:
results.append((current_filename, '\n'.join(current_contents)))
current_contents = []
current_filename = j[1:]
else:
current_contents.append(j)
if current_filename:
results.append((current_filename, '\n'.join(current_contents)))
return results
def cheapo_gnatchop(lines):
"""Performs a cheapo gnatchop on the given text.
lines is a list of strings
Returns a list of tuples of the form (basename, contents)
"""
results = []
current_basename = 'invalid.ads'
current_contents = []
body = re.compile("^(procedure|package body) ([^ ]+)")
spec = re.compile("^(package) ([^ ]+)")
end = re.compile("^end")
text_found = False
def to_base_filename(g):
return g.lower().replace('.', '-')
for j in lines:
# Append the lines to the current contents except if it's a blank
# line before anything is found
if not j and not text_found:
continue
text_found = True
current_contents.append(j)
match = body.match(j)
if match:
current_basename = to_base_filename(match.group(2)) + ".adb"
else:
match = spec.match(j)
if match:
current_basename = to_base_filename(match.group(2)) + ".ads"
else:
if end.match(j):
results.append((current_basename,
'\n'.join(current_contents)))
current_contents = []
text_found = False
if current_contents:
results.append((current_basename,
'\n'.join(current_contents)))
return results
def real_gnatchop(lines):
"""Same API as cheapo_gnatchop, but launch a real gnatchop"""
wd = tempfile.mkdtemp()
try:
gnatchop_file = os.path.join(wd, 'internal_gnatchop.txt')
with codecs.open(gnatchop_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(lines))
cmd = ['gnatchop', gnatchop_file]
output = subprocess.check_output(cmd, cwd=wd)
files = [os.path.join(wd, f.decode("utf-8").strip()) for f in output.splitlines()
if not f.startswith(b'splitting ')]
os.remove(gnatchop_file)
results = []
for file in files:
with codecs.open(file, 'rb', encoding='utf-8') as f:
results.append((os.path.basename(file), f.read().strip()))
return results
finally:
shutil.rmtree(wd)
class WidgetCodeDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'class': directives.class_option,
'name': directives.unchanged,
}
def run(self):
shadow_files_divs = ""
extra_attribs = ""
argument_list = []
force_no_buttons = False
is_lab = False
def get_shadow_div(basename, content):
return (u'<div class="shadow_file"'
'style="display:none" basename="{}">'
'{}</div>').format(basename, escape(content))
if self.arguments:
argument_list = self.arguments[0].split(' ')
if 'no_button' in argument_list or (
'class' in self.options and (
'ada-nocheck' in self.options['class'] or
'ada-syntax-only' in self.options['class'])):
force_no_buttons = True
# look for (lab|project)=my_name
name_matches = [NAME_REGEX.match(line) for line in argument_list if NAME_REGEX.match(line)]
if len(name_matches) == 1:
if name_matches[0].group(1) == "lab":
extra_attribs += ' lab="True"'
is_lab = True
extra_attribs += ' name={}'.format(name_matches[0].group(2))
elif len(name_matches) > 1:
raise self.error("malformed widget directive")
# Make sure code-config exists in the document
if not codeconfig_found:
print (self.lineno, dir(self))
raise self.error("you need to add a :code-config: role")
if is_lab:
# look for lab io start block
io_start_matches = [i for i, line in enumerate(self.content) if LAB_IO_START_REGEX.match(line)]
# look for lab io end block
io_end_matches = [i for i, line in enumerate(self.content) if LAB_IO_END_REGEX.match(line)]
# check for correct formation of lab io block
if len(io_start_matches) == 1 and len(io_end_matches) == 1 and io_start_matches[0] < io_end_matches[0]:
io_content = self.content[io_start_matches[0] + 1 : io_end_matches[0]]
# create shadow file from io blocks
new_file = "\n".join(io_content)
shadow_files_divs += get_shadow_div(LABIO_FILENAME, new_file)
# remove io block lines from self.content
# The following does not work for some odd reason so we will have to copy the list
# del self.content[io_start_matches[0] : (io_end_matches[0] + 1)]
chop_contents = self.content[:io_start_matches[0]] + self.content[io_end_matches[0] + 1:]
else:
raise self.error("malformed lab io block: io_start={} io_end={}".format(io_start_matches, io_end_matches))
else:
chop_contents = self.content
# chop contents into files
try:
# chop source files
if 'manual_chop' in argument_list:
files = c_chop(chop_contents)
elif 'c' in argument_list:
files = c_chop(chop_contents)
else:
files = real_gnatchop(chop_contents)
except subprocess.CalledProcessError:
raise self.error("could not gnatchop example")
if config.accumulate_code:
# We are accumulating code: store the new code in the
# accumulated_files
global accumulated_files
for f in files:
accumulated_files[f[0]] = f[1]
try:
if config.accumulate_code:
editor_files = set([f[0] for f in files])
for k, v in accumulated_files.items():
if k not in editor_files:
shadow_files_divs += get_shadow_div(k, v)
divs = "\n".join(
[u'<div class="file" basename="{}">{}</div>'.format(
f[0], escape(f[1])) for f in files]
)
nodes_latex = []
# Attemping to detect HTML or Latex output by checking for 'html' in tags
if 'html' not in self.state.state_machine.document.settings.env.app.tags.tags:
for f in files:
# Based on sphinx/directives/code.py
container_node = nodes.container(
'', literal_block=True,
classes=['literal-block-wrapper'])
literal = nodes.literal_block('',
f[1],
format='latex')
literal['language'] = self.arguments[0].split(' ')[0]
literal['linenos'] = 'linenos' in self.options or \
'lineno-start' in self.options
literal['source'] = f[0]
caption = nodes.caption('', f[0])
caption.source = literal.source
caption.line = literal.line
# container_node += caption
container_node += literal
nodes_latex.append(container_node)
except Exception:
# If we have an exception here, it's probably a codec error
print (files)
raise
if not force_no_buttons:
for x in (config.buttons |
set(filter(lambda y: y.endswith('_button'),
argument_list))):
extra_attribs += ' {}="True"'.format(x)
return [
nodes.raw('',
template.format(server_url=WIDGETS_SERVER_URL,
files_divs=divs,
shadow_files_divs=shadow_files_divs,
extra_attribs=extra_attribs),
format='html')
] + nodes_latex
def codeconfig(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""Support the code-config role.
This role contains a set of directives separated by ";". See below
for the list of directives.
"""
global codeconfig_found, config, accumulated_files
codeconfig_found = True
# When we encounter this directive, empty the accumulated_files
directives = text.split(';')
for d in directives:
key, value = d.strip().split('=')
if key.endswith('_button'):
if value.lower() == "true":
config.buttons.add(key)
else:
if key in config.buttons:
config.buttons.remove(key)
else:
if not hasattr(config, key):
raise inliner.error(
"wrong key for code-config: {}".format(key))
setattr(config, key, value.lower() == "true")
if config.reset_accumulator:
accumulated_files = {}
config.reset_accumulator = False
return [], []
def on_builder_inited(app):
# Connect to the "code" directive
app.add_directive('code', WidgetCodeDirective, override=True)
def setup(app):
app.add_config_value('insert_widgets', True, 'html')
app.add_role('code-config', codeconfig)
app.connect('builder-inited', on_builder_inited)
return {'version': '0.1'}
|
r"""
Wrapper class for abelian groups
This class is intended as a template for anything in Sage that needs the
functionality of abelian groups. One can create an AdditiveAbelianGroupWrapper
object from any given set of elements in some given parent, as long as an
``_add_`` method has been defined.
EXAMPLES:
We create a toy example based on the Mordell-Weil group of an elliptic curve over `\QQ`::
sage: E = EllipticCurve('30a2')
sage: pts = [E(4,-7,1), E(7/4, -11/8, 1), E(3, -2, 1)]
sage: M = AdditiveAbelianGroupWrapper(pts[0].parent(), pts, [3, 2, 2])
sage: M
Additive abelian group isomorphic to Z/3 + Z/2 + Z/2 embedded in Abelian
group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 - 19*x + 26
over Rational Field
sage: M.gens()
((4 : -7 : 1), (7/4 : -11/8 : 1), (3 : -2 : 1))
sage: 3*M.0
(0 : 1 : 0)
sage: 3000000000000001 * M.0
(4 : -7 : 1)
sage: M == loads(dumps(M)) # known bug, see https://trac.sagemath.org/sage_trac/ticket/11599#comment:7
True
TESTS:
We check that ridiculous operations are being avoided::
sage: from sage.misc.verbose import set_verbose
sage: set_verbose(2, 'additive_abelian_wrapper.py')
sage: 300001 * M.0
verbose 1 (...: additive_abelian_wrapper.py, discrete_exp) Calling discrete exp on (1, 0, 0)
(4 : -7 : 1)
sage: set_verbose(0, 'additive_abelian_wrapper.py')
.. TODO::
- Think about subgroups and quotients, which probably won't work
in the current implementation -- some fiddly adjustments will be
needed in order to be able to pass extra arguments to the
subquotient's init method.
AUTHORS:
- David Loeffler (2010)
- Lorenz Panny (2017): :meth:`AdditiveAbelianGroupWrapper.discrete_log`
"""
# ****************************************************************************
# Copyright (C) 2010 David Loeffler
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from . import additive_abelian_group as addgp
from sage.rings.integer_ring import ZZ
from sage.categories.morphism import Morphism
from sage.structure.element import parent
from sage.modules.free_module_element import vector
from sage.misc.superseded import deprecated_function_alias
class UnwrappingMorphism(Morphism):
r"""
The embedding into the ambient group. Used by the coercion framework.
"""
def __init__(self, domain):
r"""
EXAMPLES::
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), sqrt(QQbar(3))], [0, 0])
sage: F = QQbar.coerce_map_from(G); F
Generic morphism:
From: Additive abelian group isomorphic to Z + Z embedded in Algebraic Field
To: Algebraic Field
sage: type(F)
<class 'sage.groups.additive_abelian.additive_abelian_wrapper.UnwrappingMorphism'>
"""
Morphism.__init__(self, domain.Hom(domain.universe()))
def _call_(self, x):
r"""
TESTS::
sage: E = EllipticCurve("65a1")
sage: G = E.torsion_subgroup()
sage: isinstance(G, sage.groups.additive_abelian.additive_abelian_wrapper.AdditiveAbelianGroupWrapper)
True
sage: P1 = E([1,-1,1])
sage: P2 = E([0,1,0])
sage: P1 in G # indirect doctest
False
sage: P2 in G
True
sage: (G(P2) + P1) in G
False
sage: (G(P2) + P1).parent()
Abelian group of points on Elliptic Curve defined by y^2 + x*y = x^3 - x over Rational Field
"""
return self.codomain()(x.element())
class AdditiveAbelianGroupWrapperElement(addgp.AdditiveAbelianGroupElement):
"""
An element of an :class:`AdditiveAbelianGroupWrapper`.
"""
def __init__(self, parent, vector, element=None, check=False):
r"""
EXAMPLES::
sage: from sage.groups.additive_abelian.additive_abelian_wrapper import AdditiveAbelianGroupWrapper
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), sqrt(QQbar(3))], [0, 0])
sage: G.0 # indirect doctest
1.414213562373095?
"""
addgp.AdditiveAbelianGroupElement.__init__(self, parent, vector, check)
if element is not None:
element = self.parent().universe()(element)
self._element = element
def element(self):
r"""
Return the underlying object that this element wraps.
EXAMPLES::
sage: T = EllipticCurve('65a').torsion_subgroup().gen(0)
sage: T; type(T)
(0 : 0 : 1)
<class 'sage.schemes.elliptic_curves.ell_torsion.EllipticCurveTorsionSubgroup_with_category.element_class'>
sage: T.element(); type(T.element())
(0 : 0 : 1)
<class 'sage.schemes.elliptic_curves.ell_point.EllipticCurvePoint_number_field'>
"""
if self._element is None:
self._element = self.parent().discrete_exp(self._hermite_lift())
return self._element
def _repr_(self):
r"""
String representation of self.
EXAMPLES::
sage: T = EllipticCurve('65a').torsion_subgroup().gen(0)
sage: repr(T) # indirect doctest
'(0 : 0 : 1)'
"""
return repr(self.element())
class AdditiveAbelianGroupWrapper(addgp.AdditiveAbelianGroup_fixed_gens):
"""
This class is used to wrap a subgroup of an existing
additive abelian group as a new additive abelian group.
EXAMPLES::
sage: G2 = AdditiveAbelianGroupWrapper(Zmod(42), [2], [21]); G2
Additive abelian group isomorphic to Z/21 embedded in Ring of integers modulo 42
sage: G6 = AdditiveAbelianGroupWrapper(Zmod(42), [6], [7]); G6
Additive abelian group isomorphic to Z/7 embedded in Ring of integers modulo 42
sage: G = AdditiveAbelianGroupWrapper(Zmod(42), [21,14,6], [2,3,7]); G
Additive abelian group isomorphic to Z/2 + Z/3 + Z/7 embedded in Ring of integers modulo 42
sage: G.invariants()
(42,)
::
sage: AdditiveAbelianGroupWrapper(QQbar, [sqrt(2), sqrt(3)], [0, 0])
Additive abelian group isomorphic to Z + Z embedded in Algebraic Field
::
sage: EllipticCurve(GF(419**2), [1,0]).abelian_group() # indirect doctest
Additive abelian group isomorphic to Z/420 + Z/420 embedded in Abelian group of points on Elliptic Curve defined by y^2 = x^3 + x over Finite Field in z2 of size 419^2
"""
Element = AdditiveAbelianGroupWrapperElement
def __init__(self, universe, gens, invariants):
r"""
EXAMPLES::
sage: AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), sqrt(QQbar(3))], [0, 0]) # indirect doctest
Additive abelian group isomorphic to Z + Z embedded in Algebraic Field
"""
self._universe = universe
self._gen_elements = tuple(universe(x) for x in gens)
self._gen_orders = invariants
cover,rels = addgp.cover_and_relations_from_invariants(invariants)
addgp.AdditiveAbelianGroup_fixed_gens.__init__(self, cover, rels, cover.gens())
self._unset_coercions_used()
self.register_embedding(UnwrappingMorphism(self))
def universe(self):
r"""
The ambient group in which this abelian group lives.
EXAMPLES::
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), sqrt(QQbar(3))], [0, 0])
sage: G.universe()
Algebraic Field
"""
return self._universe
def generator_orders(self):
r"""
The orders of the generators with which this group was initialised.
(Note that these are not necessarily a minimal set of generators.)
Generators of infinite order are returned as 0. Compare
``self.invariants()``, which returns the orders of a minimal set of
generators.
EXAMPLES::
sage: V = Zmod(6)**2
sage: G = AdditiveAbelianGroupWrapper(V, [2*V.0, 3*V.1], [3, 2])
sage: G.generator_orders()
(3, 2)
sage: G.invariants()
(6,)
"""
return tuple(self._gen_orders)
def _repr_(self):
r"""
EXAMPLES::
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), sqrt(QQbar(3))], [0, 0])
sage: repr(G) # indirect doctest
'Additive abelian group isomorphic to Z + Z embedded in Algebraic Field'
"""
return addgp.AdditiveAbelianGroup_fixed_gens._repr_(self) + " embedded in " + self.universe()._repr_()
def discrete_exp(self, v):
r"""
Given a list (or other iterable) of length equal to the number of
generators of this group, compute the element of the ambient group
with those exponents in terms of the generators of self.
EXAMPLES::
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(QQbar(2)), -1], [0, 0])
sage: v = G.discrete_exp([3, 5]); v
-0.7573593128807148?
sage: v.parent() is QQbar
True
This method is an inverse of :meth:`discrete_log`::
sage: orders = [2, 2*3, 2*3*5, 2*3*5*7, 2*3*5*7*11]
sage: G = AdditiveAbelianGroup(orders)
sage: A = AdditiveAbelianGroupWrapper(G.0.parent(), G.gens(), orders)
sage: el = A.random_element()
sage: A.discrete_exp(A.discrete_log(el)) == el
True
TESTS:
Check that :meth:`_discrete_exp` still works (for now)::
sage: A._discrete_exp(list(range(1,6)))
doctest:warning ...
DeprecationWarning: _discrete_exp is deprecated. ...
(1, 2, 3, 4, 5)
"""
from sage.misc.verbose import verbose
v = self.V()(v)
verbose("Calling discrete exp on %s" % v)
# DUMB IMPLEMENTATION!
return sum([self._gen_elements[i] * ZZ(v[i]) for i in range(len(v))], self.universe()(0))
_discrete_exp = deprecated_function_alias(32384, discrete_exp)
def discrete_log(self, x, gens=None):
r"""
Given an element of the ambient group, attempt to express it in terms
of the generators of this group or the given generators of a subgroup.
ALGORITHM:
This reduces to p-groups, then calls :func:`_discrete_log_pgroup` which
implements a basic version of the recursive algorithm from [Suth2008]_.
AUTHORS:
- Lorenz Panny (2017)
EXAMPLES::
sage: G = AdditiveAbelianGroup([2, 2*3, 2*3*5, 2*3*5*7, 2*3*5*7*11])
sage: A = AdditiveAbelianGroupWrapper(G.0.parent(), G.gens(), [g.order() for g in G.gens()])
sage: A.discrete_log(A.discrete_exp([1,5,23,127,539]))
(1, 5, 23, 127, 539)
::
sage: F.<t> = GF(1009**2, modulus=x**2+11); E = EllipticCurve(j=F(940))
sage: P, Q = E(900*t + 228, 974*t + 185), E(1007*t + 214, 865*t + 802)
sage: E.abelian_group().discrete_log(123 * P + 777 * Q, [P, Q])
(123, 777)
::
sage: V = Zmod(8)**2
sage: G = AdditiveAbelianGroupWrapper(V, [[2,2],[4,0]], [4, 2])
sage: G.discrete_log(V([6, 2]))
(1, 1)
sage: G.discrete_log(V([6, 4]))
Traceback (most recent call last):
...
TypeError: Not in group
::
sage: G = AdditiveAbelianGroupWrapper(QQbar, [sqrt(2)], [0])
sage: G.discrete_log(QQbar(2*sqrt(2)))
Traceback (most recent call last):
...
NotImplementedError: No black-box discrete log for infinite abelian groups
TESTS:
Check that :meth:`_discrete_log` still works (for now)::
sage: orders = [2, 2*3, 2*3*5, 2*3*5*7, 2*3*5*7*11]
sage: G = AdditiveAbelianGroup(orders)
sage: A = AdditiveAbelianGroupWrapper(G.0.parent(), G.gens(), orders)
sage: A._discrete_log(sum(i*g for i,g in enumerate(G.gens(),1)))
doctest:warning ...
DeprecationWarning: _discrete_log is deprecated. ...
(1, 2, 3, 4, 5)
"""
from sage.arith.misc import CRT_list
from sage.rings.infinity import Infinity
if self.order() == Infinity:
raise NotImplementedError("No black-box discrete log for infinite abelian groups")
if gens is None:
gens = self.gens()
ords = self.generator_orders()
else:
ords = [g.order() for g in gens]
gens = [self._universe(g.element() if parent(g) is self else g) for g in gens]
x = self._universe(x.element() if parent(x) is self else x)
crt_data = [[] for _ in gens]
for p in self.exponent().prime_factors():
cofactor = self.exponent().prime_to_m_part(p)
pgens = [cofactor * g for g in gens]
y = cofactor * x
pvals = [o.valuation(p) for o in ords]
plog = _discrete_log_pgroup(p, pvals, pgens, y)
for i, (r, v) in enumerate(zip(plog, pvals)):
crt_data[i].append((r, p**v))
res = vector(CRT_list(*map(list, zip(*l))) for l in crt_data)
assert x == sum(r * g for r, g in zip(res, gens))
return res
_discrete_log = deprecated_function_alias(32384, discrete_log)
def torsion_subgroup(self, n=None):
r"""
Return the `n`-torsion subgroup of this additive abelian group
when `n` is given, and the torsion subgroup otherwise.
The [`n`-]torsion subgroup consists of all elements whose order
is finite [and divides `n`].
EXAMPLES::
sage: ords = [2, 2*3, 2*3*5, 0, 2*3*5*7, 2*3*5*7*11]
sage: G = AdditiveAbelianGroup(ords)
sage: A = AdditiveAbelianGroupWrapper(G.0.parent(), G.gens(), ords)
sage: T = A.torsion_subgroup(5)
sage: T
Additive abelian group isomorphic to Z/5 + Z/5 + Z/5 embedded in Additive abelian group isomorphic to Z/2 + Z/6 + Z/30 + Z + Z/210 + Z/2310
sage: T.gens()
((0, 0, 6, 0, 0, 0), (0, 0, 0, 0, 42, 0), (0, 0, 0, 0, 0, 462))
::
sage: E = EllipticCurve(GF(487^2), [311,205])
sage: T = E.abelian_group().torsion_subgroup(42)
sage: T
Additive abelian group isomorphic to Z/42 + Z/6 embedded in Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 311*x + 205 over Finite Field in z2 of size 487^2
sage: [P.order() for P in T.gens()]
[42, 6]
::
sage: E = EllipticCurve('574i1')
sage: pts = [E(103,172), E(61,18)]
sage: assert pts[0].order() == 7 and pts[1].order() == infinity
sage: M = AdditiveAbelianGroupWrapper(pts[0].parent(), pts, [7,0])
sage: M
Additive abelian group isomorphic to Z/7 + Z embedded in Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 19353*x + 958713 over Rational Field
sage: M.torsion_subgroup()
Additive abelian group isomorphic to Z/7 embedded in Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 19353*x + 958713 over Rational Field
sage: M.torsion_subgroup(7)
Additive abelian group isomorphic to Z/7 embedded in Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 19353*x + 958713 over Rational Field
sage: M.torsion_subgroup(5)
Trivial group embedded in Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 19353*x + 958713 over Rational Field
AUTHORS:
- Lorenz Panny (2022)
"""
genords = zip(self._gen_elements, self._gen_orders)
if n is None:
gens, ords = zip(*(t for t in genords if t[1]))
else:
n = ZZ(n)
if n <= 0:
raise ValueError('n must be a positive integer')
gens, ords = [], []
for g,o in genords:
if not o:
continue
d = n.gcd(o)
if d == 1:
continue
gens.append(o//d * g)
ords.append(d)
return AdditiveAbelianGroupWrapper(self.universe(), gens, ords)
def _element_constructor_(self, x, check=False):
r"""
Create an element from x. This may be either an element of self, an element of the
ambient group, or an iterable (in which case the result is the corresponding
product of the generators of self).
EXAMPLES::
sage: V = Zmod(8)**2
sage: G = AdditiveAbelianGroupWrapper(V, [[2,2],[4,0]], [4, 2])
sage: G(V([6,2]))
(6, 2)
sage: G([1,1])
(6, 2)
sage: G(G([1,1]))
(6, 2)
"""
if parent(x) is self.universe():
return self.element_class(self, self.discrete_log(x), element = x)
return addgp.AdditiveAbelianGroup_fixed_gens._element_constructor_(self, x, check)
def _discrete_log_pgroup(p, vals, aa, b):
r"""
Attempt to express an element of p-power order in terms of
generators of a p-subgroup of this group.
Used as a subroutine in :meth:`discrete_log`.
ALGORITHM:
This implements a basic version of the recursive algorithm
from [Suth2008]_.
The base cases are handled using a variant of Shanks'
baby-step giant-step algorithm for products of cyclic groups.
EXAMPLES::
sage: G = AdditiveAbelianGroup([5, 5**2, 5**4, 5**4])
sage: (a, b, c, d) = gs = G.gens()
sage: A = AdditiveAbelianGroupWrapper(a.parent(), gs, [g.order() for g in gs])
sage: from sage.groups.additive_abelian.additive_abelian_wrapper import _discrete_log_pgroup
sage: _discrete_log_pgroup(5, [1,2,4,4], gs, a + 17*b + 123*c + 456*d)
(1, 17, 123, 456)
"""
from itertools import product as iproduct
qq = lambda j, k: vector(p ** (j + max(0, v - k)) for a, v in zip(aa, vals))
subbasis = lambda j, k: [q * a for q, a in zip(qq(j, k), aa)]
dotprod = lambda xs, ys: sum(x * y for x, y in zip(xs, ys))
def _base(j, k, c):
assert k - j == 1
aajk = subbasis(j, k)
assert not any(p*a for a in aajk) # orders are in {1,p}
idxs = [i for i, a in enumerate(aajk) if a]
rs = [([0], [0]) for i in range(len(aajk))]
for i in range(len(idxs)):
rs[idxs[i]] = (range(p), [0]) if i % 2 else ([0], range(p))
if len(idxs) % 2:
m = p.isqrt() + 1 # hence m^2 >= p
rs[idxs[-1]] = range(0, p, m), range(m)
tab = {}
for x in iproduct(*(r for r, _ in rs)):
key = dotprod(x, aajk)
if hasattr(key, 'set_immutable'):
key.set_immutable()
tab[key] = vector(x)
for y in iproduct(*(r for _, r in rs)):
key = c - dotprod(y, aajk)
if hasattr(key, 'set_immutable'):
key.set_immutable()
if key in tab:
return tab[key] + vector(y)
raise TypeError('Not in group')
def _rec(j, k, c):
assert 0 <= j < k
if k - j <= 1: # base case
return _base(j, k, c)
w = 2
js = list(range(j, k, (k-j+w-1) // w)) + [k]
assert len(js) == w + 1
x = vector([0] * len(aa))
for i in reversed(range(w)):
gamma = p ** (js[i] - j) * c - dotprod(x, subbasis(js[i], k))
v = _rec(js[i], js[i+1], gamma)
assert not any(q1 % q2 for q1, q2 in zip(qq(js[i], js[i+1]), qq(js[i], k)))
x += vector(q1 // q2 * r for q1, q2, r in zip(qq(js[i], js[i+1]), qq(js[i], k), v))
return x
return _rec(0, max(vals), b)
|
#!/usr/bin/env python3
"""
An autogenerated testfile for python.
"""
import unittest
from unittest.mock import patch
from io import StringIO
import re
import os
import sys
from unittest import TextTestRunner
from examiner import ExamTestCase, ExamTestResult, tags
from examiner import import_module, find_path_to_assignment
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_PATH = find_path_to_assignment(FILE_DIR)
if REPO_PATH not in sys.path:
sys.path.insert(0, REPO_PATH)
# Path to file and basename of the file to import
main = import_module(REPO_PATH, 'main')
marvin = import_module(REPO_PATH, 'marvin')
class Test3Marvin2Extra(ExamTestCase):
"""
Each assignment has 1 testcase with multiple asserts.
The different asserts https://docs.python.org/3.6/library/unittest.html#test-cases
"""
@classmethod
def setUpClass(cls):
"""
To find all relative files that are read or written to.
"""
os.chdir(REPO_PATH)
def check_print_contain(self, inp, correct, func):
"""
One function for testing print input functions.
"""
with patch("builtins.input", side_effect=inp):
with patch("sys.stdout", new=StringIO()) as fake_out:
func()
str_data = fake_out.getvalue()
for val in correct:
self.assertIn(val, str_data)
@tags("b1")
def test_points_to_grade_menu(self):
"""
Testar att anropa menyval b1 via main funktionen i main.py.
Använder följande som input:
{arguments}
Förväntar att följande skrivs ut :
{correct}
Fick följande:
{student}
"""
self._multi_arguments = ["b1", "100", "59", "", "q"]
self.check_print_contain(self._multi_arguments, ["score: F"], main.main)
@tags("b1")
def test_points_to_grade_func(self):
"""
Testar att anropa points_to_grade i marvin.py.
Använder följande som argument:
{arguments}
Förväntar att följande sträng returneras:
{correct}
Fick följande:
{student}
"""
self._multi_arguments = ["70", "50"]
self.assertEqual(
marvin.points_to_grade(*self._multi_arguments),
"score: C"
)
@tags("b2")
def test_has_strings_menu(self):
"""
Testar att anropa menyval b2 via main funktionen i main.py.
Använder följande som input:
{arguments}
Förväntar att följande sträng finns med i utskriften:
{correct}
Fick följande:
{student}
"""
self._multi_arguments = ["b2", "anagram", "ana", "agr", "am", "", "q"]
self.check_print_contain(self._multi_arguments, ["Match"], main.main)
@tags("b2")
def test_has_strings_func(self):
"""
Testar att anropa has_strings i marvin.py.
Använder följande som argument:
{arguments}
Förväntar att följande sträng returneras:
{correct}
Fick följande:
{student}
"""
self._multi_arguments = ["Palindrom", "par", "ind", "rom" ]
self.assertEqual(
marvin.has_strings(*self._multi_arguments),
"No match"
)
if __name__ == '__main__':
runner = TextTestRunner(resultclass=ExamTestResult, verbosity=2)
unittest.main(testRunner=runner, exit=False)
|
# Copyright (c) 2013, 2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Andreas Hansson
from __future__ import print_function
from __future__ import absolute_import
import m5.objects
import inspect
import sys
from textwrap import TextWrapper
from . import HMC
# Dictionary of mapping names of real memory controller models to
# classes.
_mem_classes = {}
def is_mem_class(cls):
"""Determine if a class is a memory controller that can be instantiated"""
# We can't use the normal inspect.isclass because the ParamFactory
# and ProxyFactory classes have a tendency to confuse it.
try:
return issubclass(cls, m5.objects.AbstractMemory) and \
not cls.abstract
except TypeError:
return False
def get(name):
"""Get a memory class from a user provided class name."""
try:
mem_class = _mem_classes[name]
return mem_class
except KeyError:
print("%s is not a valid memory controller." % (name,))
sys.exit(1)
def print_mem_list():
"""Print a list of available memory classes."""
print("Available memory classes:")
doc_wrapper = TextWrapper(initial_indent="\t\t", subsequent_indent="\t\t")
for name, cls in _mem_classes.items():
print("\t%s" % name)
# Try to extract the class documentation from the class help
# string.
doc = inspect.getdoc(cls)
if doc:
for line in doc_wrapper.wrap(doc):
print(line)
def mem_names():
"""Return a list of valid memory names."""
return list(_mem_classes.keys())
# Add all memory controllers in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_mem_class):
_mem_classes[name] = cls
def create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits, intlv_size):
"""
Helper function for creating a single memoy controller from the given
options. This function is invoked multiple times in config_mem function
to create an array of controllers.
"""
import math
intlv_low_bit = int(math.log(intlv_size, 2))
# Use basic hashing for the channel selection, and preferably use
# the lower tag bits from the last level cache. As we do not know
# the details of the caches here, make an educated guess. 4 MByte
# 4-way associative with 64 byte cache lines is 6 offset bits and
# 14 index bits.
xor_low_bit = 20
# Create an instance so we can figure out the address
# mapping and row-buffer size
ctrl = cls()
# Only do this for DRAMs
if issubclass(cls, m5.objects.DRAMCtrl):
# Inform each controller how many channels to account
# for
ctrl.channels = nbr_mem_ctrls
# If the channel bits are appearing after the column
# bits, we need to add the appropriate number of bits
# for the row buffer size
if ctrl.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every
# one
rowbuffer_size = ctrl.device_rowbuffer_size.value * \
ctrl.devices_per_rank.value
intlv_low_bit = int(math.log(rowbuffer_size, 2))
# We got all we need to configure the appropriate address
# range
ctrl.range = m5.objects.AddrRange(r.start, size = r.size(),
intlvHighBit = \
intlv_low_bit + intlv_bits - 1,
xorHighBit = \
xor_low_bit + intlv_bits - 1,
intlvBits = intlv_bits,
intlvMatch = i)
return ctrl
def config_ori_mem(options, system):
"""
Create the memory controllers based on the options and attach them.
If requested, we make a multi-channel configuration of the
selected memory controller class by creating multiple instances of
the specific class. The individual controllers have their
parameters set such that the address range is interleaved between
them.
"""
# Mandatory options
opt_mem_type = options.mem_type
opt_mem_channels = options.mem_channels
# Optional options
opt_tlm_memory = getattr(options, "tlm_memory", None)
opt_external_memory_system = getattr(options, "external_memory_system",
None)
opt_elastic_trace_en = getattr(options, "elastic_trace_en", False)
opt_mem_ranks = getattr(options, "mem_ranks", None)
if opt_mem_type == "HMC_2500_1x32":
HMChost = HMC.config_hmc_host_ctrl(options, system)
HMC.config_hmc_dev(options, system, HMChost.hmc_host)
subsystem = system.hmc_dev
xbar = system.hmc_dev.xbar
else:
subsystem = system
xbar = system.membus
if opt_tlm_memory:
system.external_memory = m5.objects.ExternalSlave(
port_type="tlm_slave",
port_data=opt_tlm_memory,
port=system.membus.master,
addr_ranges=system.mem_ranges)
system.kernel_addr_check = False
return
if opt_external_memory_system:
subsystem.external_memory = m5.objects.ExternalSlave(
port_type=opt_external_memory_system,
port_data="init_mem0", port=xbar.master,
addr_ranges=system.mem_ranges)
subsystem.kernel_addr_check = False
return
nbr_mem_ctrls = opt_mem_channels
import math
from m5.util import fatal
intlv_bits = int(math.log(nbr_mem_ctrls, 2))
if 2 ** intlv_bits != nbr_mem_ctrls:
fatal("Number of memory channels must be a power of 2")
cls = get(opt_mem_type)
mem_ctrls = []
if opt_elastic_trace_en and not issubclass(cls, m5.objects.SimpleMemory):
fatal("When elastic trace is enabled, configure mem-type as "
"simple-mem.")
# The default behaviour is to interleave memory channels on 128
# byte granularity, or cache line granularity if larger than 128
# byte. This value is based on the locality seen across a large
# range of workloads.
intlv_size = max(128, system.cache_line_size.value)
# For every range (most systems will only have one), create an
# array of controllers and set their parameters to match their
# address mapping in the case of a DRAM
for r in system.mem_ranges:
for i in range(nbr_mem_ctrls):
mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
intlv_size)
# Set the number of ranks based on the command-line
# options if it was explicitly set
if issubclass(cls, m5.objects.DRAMCtrl) and opt_mem_ranks:
mem_ctrl.ranks_per_channel = opt_mem_ranks
# Enable low-power DRAM states if option is set
if issubclass(cls, m5.objects.DRAMCtrl):
mem_ctrl.enable_dram_powerdown = \
options.enable_dram_powerdown
if opt_elastic_trace_en:
mem_ctrl.latency = '1ns'
print("For elastic trace, over-riding Simple Memory "
"latency to 1ns.")
mem_ctrls.append(mem_ctrl)
subsystem.mem_ctrls = mem_ctrls
# Connect the controllers to the membus
for i in range(len(subsystem.mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32":
subsystem.mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for
# each vault. All vaults are same size.
subsystem.mem_ctrls[i].device_size = options.hmc_dev_vault_size
else:
subsystem.mem_ctrls[i].port = xbar.master
def valid_size_of(container):
import math
if container is None:
return -1
else:
return len(container)
def create_addr_alignment_mapper(addr_base, original_ranges):
from m5.util import convert
from m5.objects import AddrRange, CowardAddrMapper
remapped_ranges = []
next_base = addr_base
assert(valid_size_of(original_ranges) > 0)
for rg in original_ranges:
remapped_ranges.append(AddrRange(next_base, size = rg.size()))
next_base = next_base + rg.size()
assert(valid_size_of(original_ranges) == valid_size_of(remapped_ranges))
return CowardAddrMapper(original_ranges = original_ranges, \
remapped_ranges = remapped_ranges)
def create_home_agent(phys_ranges):
from m5.util import convert
from m5.objects import AddrRange, HomeAgent
mem_ranges = []
next_base = convert.toMemorySize('0')
assert(valid_size_of(phys_ranges) > 0)
for rg in phys_ranges:
mem_ranges.append(AddrRange(next_base, size = rg.size()))
next_base = next_base + rg.size()
assert(valid_size_of(phys_ranges) == valid_size_of(mem_ranges))
return HomeAgent(phys_ranges = phys_ranges, mem_ranges = mem_ranges)
def create_mem_subsystem(options, system, intlv_size, disable_kvm_map):
import math
from m5.util import fatal, convert
from m5.objects import Addr, AddrRange
from m5.objects import FlexMem, HybridMem, PortForwarder, SimpleMemory
if options.mem_type != "MemSubsystem":
fatal("options.mem_type != 'MemSubsystem'")
if getattr(options, "tlm_memory", None):
fatal("tlm_memory")
if getattr(options, "external_memory_system", None):
fatal("external_memory_system")
if getattr(options, "elastic_trace_en", False):
fatal("elastic_trace_en")
if options.mem_channels != 1:
fatal("options.mem_channels != 1")
if options.num_dirs != 1:
fatal("options.num_dirs != 1")
if getattr(options, "mem_ranks", None):
fatal("mem_ranks")
split_char = ';'
channel_forwarder = PortForwarder()
channel_ranges = []
channel_sizes = options.channel_sizes.split(split_char)
channel_types = options.channel_types.split(split_char)
to_channel_addr = []
mem_ctrls = []
phys_data = []
balance_interval = options.balance_interval
warmup_interval = options.time_warmup
mem_space_size = convert.toMemorySize('0')
assert(valid_size_of(channel_sizes) > 0)
for sz in [convert.toMemorySize(x) for x in channel_sizes]:
channel_ranges.append(AddrRange(mem_space_size, size = sz))
mem_space_size = mem_space_size + sz
assert(mem_space_size >= (sum(rg.size() for rg in system.mem_ranges)))
assert(valid_size_of(channel_types) > 0)
for tp in channel_types:
assert(issubclass(get(tp), m5.objects.DRAMCtrl))
assert(tp != "HMC_2500_1x32")
assert(valid_size_of(channel_types) == valid_size_of(channel_ranges))
assert(valid_size_of(channel_ranges) > 0)
for idx in range(len(channel_ranges)):
mapper = create_addr_alignment_mapper( \
convert.toMemorySize('0'), [channel_ranges[idx]])
assert(valid_size_of(mapper.remapped_ranges) == 1)
mem_ctrl = create_mem_ctrl(get(channel_types[idx]), \
mapper.remapped_ranges[0], 0, 1, 0, intlv_size)
mem_ctrl.in_addr_map = False
mem_ctrl.kvm_map = False
channel_forwarder.master = mapper.slave
mapper.master = mem_ctrl.port
to_channel_addr.append(mapper)
mem_ctrls.append(mem_ctrl)
assert(valid_size_of(system.mem_ranges) > 0)
for rg in system.mem_ranges:
data = SimpleMemory(range = rg)
if disable_kvm_map:
data.kvm_map = False
phys_data.append(data)
home_agent = create_home_agent(system.mem_ranges)
if convert.toMemorySize(options.channel_intlv_size) == 0:
hybrid_mem = HybridMem(phys_ranges = home_agent.phys_ranges, \
mem_ranges = home_agent.mem_ranges, \
channel_ranges=channel_ranges, \
time_interval=balance_interval, \
time_warmup=warmup_interval)
home_agent.master = hybrid_mem.slave
hybrid_mem.master = channel_forwarder.slave
system.hybrid_mem = hybrid_mem
elif convert.toMemorySize(options.channel_intlv_size) > 0:
flex_mem = FlexMem(mem_ranges = home_agent.mem_ranges, \
channel_ranges = channel_ranges, \
intlv_size = options.channel_intlv_size)
home_agent.master = flex_mem.slave
flex_mem.master = channel_forwarder.slave
system.flex_mem = flex_mem
else:
fatal("impossible")
system.mem_subsystem = home_agent
system.channel_forwarder = channel_forwarder
system.to_channel_addr = to_channel_addr
system.mem_ctrls = mem_ctrls
system.phys_data = phys_data
system.mmap_using_noreserve = True
return system.mem_subsystem
def config_mem_subsystem(options, system):
import math
from m5.objects import CowardAddrMapper, PortForwarder
intlv_size = max(128, system.cache_line_size.value)
system_mem_range_port = []
mem_subsystem_forwarder = PortForwarder()
mem_subsystem = create_mem_subsystem(options, system, intlv_size, False)
mem_subsystem_forwarder.master = mem_subsystem.slave
for rg in system.mem_ranges:
range_port = CowardAddrMapper( \
original_ranges = rg, remapped_ranges = rg)
system.membus.master = range_port.slave
range_port.master = mem_subsystem_forwarder.slave
system_mem_range_port.append(range_port)
system.system_mem_range_port = system_mem_range_port
system.mem_subsystem_forwarder = mem_subsystem_forwarder
def config_mem(options, system):
if options.mem_type == "MemSubsystem":
config_mem_subsystem(options, system)
else:
config_ori_mem(options, system)
|
#!/usr/bin/env python
# coding: utf-8
class Default(object):
def __init__(self):
import re
self.__pattern = re.compile('\s+')
def segment(self, text):
from salada import language
sequence = self.__pattern.split(text)
last_index = len(sequence) - 1
segments = [
language.Segment(
s, i == 0, i == last_index,
) for i, s in enumerate(sequence)
]
return segments
|
import re, json, io
thesaurus = []
with open('th_en_US_v2.dat') as fp:
encoding = fp.readline().rstrip('\n')
lineNumber = 1
numberOfMeanings = 0
for line in fp:
lineNumber += 1
line = line.rstrip('\n').split("|")
if len(line) == 2 and not (line[0].startswith("(") and line[0].endswith(")")):
word = line[0]
if numberOfMeanings != 0:
print "Unexpected error E at line {}".format(lineNumber)
numberOfMeanings = int(line[1])
# thesaurusWord = {"word": word, "numberOfMeanings": numberOfMeanings, "meanings": []}
thesaurusWord = {"word": word, "numberOfSynonyms": 0, "synonyms": []}
elif len(line) == 1:
print "Unexpected error A at line {}".format(lineNumber)
word = "?"
if numberOfMeanings != 0:
print "Unexpected error F at line {}".format(lineNumber)
numberOfMeanings = int(line[0])
else:
numberOfMeanings -= 1
if numberOfMeanings < 0:
# more number of meanings than expected
print "Unexpected error D at line {}".format(lineNumber)
else:
# meaning = {
# "partOfSpeech": "",
# "synonyms": [],
# "genericTerms": [],
# "relatedTerms": [],
# "similarTerms": [],
# "antonyms": []
# }
for item in line:
if item.startswith("(") and item.endswith(")"):
partOfSpeech = item[1:-1]
# meaning["partOfSpeech"] = partOfSpeech
continue
special = re.compile("(.+) \((generic term|related term|similar term|antonym)\)").split(item)
special = filter(None,special)
if len(special) == 0:
# probably encountered "||"
print "Unexpected error B at line {}".format(lineNumber)
continue
elif len(special) == 1:
synonym = item
# meaning["synonyms"].append(synonym)
if thesaurusWord["word"] != synonym and synonym not in thesaurusWord["synonyms"]:
thesaurusWord["numberOfSynonyms"] += 1
thesaurusWord["synonyms"].append(synonym)
elif len(special) == 2:
synonym = special[0]
synonymType = special[1]
# if synonymType == "generic term":
# meaning["genericTerms"].append(synonym)
# elif synonymType == "related term":
# meaning["relatedTerms"].append(synonym)
# elif synonymType == "similar term":
# meaning["similarTerms"].append(synonym)
# elif synonymType == "antonym":
# meaning["antonyms"].append(synonym)
# else:
# print "Unexpected error G at line {}".format(lineNumber)
else:
# probably encountered "((", "))", or something unexpected
print "Unexpected error C at line {}".format(lineNumber)
# thesaurusWord["meanings"].append(meaning)
if numberOfMeanings == 0:
thesaurus.append(thesaurusWord)
with open('thesaurus.json', 'w') as outfile:
json.dump(thesaurus, outfile)
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Pure-Python RSA implementation."""
from .cryptomath import *
from .asn1parser import ASN1Parser
from .rsakey import *
from .pem import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return Python_RSAKey._parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return Python_RSAKey._parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
parsePEM = staticmethod(parsePEM)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
# first element in PrivateKeyInfo is an INTEGER
version = p.getChild(0).value
if bytesToNumber(version) != 0:
raise SyntaxError("Unrecognized PKCS8 version")
# second element in PrivateKeyInfo is a SEQUENCE of type
# AlgorithmIdentifier
algIdent = p.getChild(1)
seqLen = algIdent.getChildCount()
# first item of AlgorithmIdentifier is an OBJECT (OID)
oid = algIdent.getChild(0)
if list(oid.value) == [42, 134, 72, 134, 247, 13, 1, 1, 1]:
keyType = "rsa"
elif list(oid.value) == [42, 134, 72, 134, 247, 13, 1, 1, 10]:
keyType = "rsa-pss"
else:
raise SyntaxError("Unrecognized AlgorithmIdentifier: {0}"
.format(list(oid.value)))
# second item of AlgorithmIdentifier are parameters (defined by
# above algorithm)
if keyType == "rsa":
if seqLen != 2:
raise SyntaxError("Missing parameters for RSA algorithm ID")
parameters = algIdent.getChild(1)
if parameters.value != bytearray(0):
raise SyntaxError("RSA parameters are not NULL")
else: # rsa-pss
pass # ignore parameters - don't apply restrictions
if seqLen > 2:
raise SyntaxError("Invalid encoding of AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
|
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db'
db = SQLAlchemy(app)
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
content = db.Column(db.Text, nullable=False)
author = db.Column(db.String(20), nullable=False, default='N/A')
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return 'Blog post ' + str(self.id)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/posts', methods=['GET', 'POST'])
def posts():
if request.method == 'POST':
post_title = request.form['title']
post_content = request.form['content']
post_author = request.form['author']
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post)
db.session.commit()
return redirect('/posts')
else:
all_posts = BlogPost.query.order_by(BlogPost.date_posted).all()
return render_template('posts.html', posts=all_posts)
@app.route('/posts/delete/<int:id>')
def delete(id):
post = BlogPost.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect('/posts')
@app.route('/posts/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = BlogPost.query.get_or_404(id)
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
db.session.commit()
return redirect('/posts')
else:
return render_template('edit.html', post=post)
@app.route('/posts/new', methods=['GET', 'POST'])
def new_post():
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post)
db.session.commit()
return redirect('/posts')
else:
return render_template('new_post.html')
if __name__ == "__main__":
app.run(debug=True)
|
import unittest
from main import parse_data
from models import Marker
class TestParseData(unittest.TestCase):
def setUp(self):
self.marker_dummy = dict(type=1, title="test title", description="test description", latitude=1, longitude=1)
self.bad_marker_dummy = dict(type=1, title="No properties")
def tearDown(self):
self.marker_dummy = None
self.bad_marker_dummy = None
def test_data_null(self):
self.assertIsNone(parse_data(Marker, None))
def test_bad_data(self):
self.assertIsNone(parse_data(Marker, self.bad_marker_dummy))
def test_parse_marker(self):
marker = parse_data(Marker, self.marker_dummy)
for key, value in self.marker_dummy.iteritems():
self.assertEqual(getattr(marker, key), value)
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestParseData)
unittest.TextTestRunner(verbosity=2).run(suite)
|
# Create your views here.
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.shortcuts import redirect
from OctaHomeDeviceInput.models import *
from OctaHomeCore.views.base import *
from OctaHomeCore.models import *
from OctaHomeLights.models import *
from OctaHomeTempControl.models import *
class handleHomeStatsView(viewRequestHandler):
def getViewParameters(self):
allDevices = Device.getDevices()
lights = LightDevice.getDevices()
fans = Fan.getDevices()
numDevices = len(allDevices)
numLights = len(lights)
numFans = len(fans)
devicesOn = 0
lightsOn = 0
for light in lights:
if light.IsOn:
lightsOn = lightsOn + 1
devicesOn = devicesOn + 1
fansOn = 0
for fan in fans:
if fan.MaxFanSpeed > 0:
fansOn = fansOn + 1
devicesOn = devicesOn + 1
stats = {
'numDevices':numDevices,
'numLights':numLights,
'numFans':numFans,
'devicesOn':devicesOn,
'lightsOn':lightsOn,
'fansOn':fansOn,
}
paramerters = {'Stats':stats}
return paramerters
return None
def getTemplate(self):
return 'OctaHomeHomeStats/HomeStats'
def getSidebarUrlName(self):
return 'Home'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
main.py
Main script for AsyncioProp.
usage: python3 main.py [-h] [-s SERVER] [-p PORT] [-d] [-l LOGGER]
optional arguments:
-h, --help show this help message and exit
-s SERVER, --server SERVER
change MQTT server host
-p PORT, --port PORT change MQTT server port
-d, --debug set DEBUG log level
-l LOGGER, --logger LOGGER
use logging config file
To switch MQTT broker, kill the program and start again with new arguments.
'''
import asyncio
import os
import platform
import signal
import sys
import uuid
import paho.mqtt.client as mqtt
os.chdir(os.path.dirname(os.path.abspath(__file__)))
from constants import *
try:
PYPROPS_CORELIBPATH
sys.path.append(PYPROPS_CORELIBPATH)
except NameError:
pass
from CryingDollApp import CryingDollApp
from Singleton import Singleton, SingletonException
me = None
try:
me = Singleton()
except SingletonException:
sys.exit(-1)
except BaseException as e:
print(e)
if USE_GPIO and os.path.isfile('/opt/vc/include/bcm_host.h'):
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
mqtt_client = mqtt.Client(uuid.uuid4().urn, clean_session=True, userdata=None)
app = CryingDollApp(sys.argv, mqtt_client, debugging_mqtt=False)
loop = asyncio.get_event_loop()
app.withEventLoop(loop)
# Assign handler for process exit (shows not effect on Windows in debug here)
signal.signal(signal.SIGTERM, loop.stop)
signal.signal(signal.SIGINT, loop.stop)
if platform.system() != 'Windows':
signal.signal(signal.SIGHUP, loop.stop)
signal.signal(signal.SIGQUIT, loop.stop)
loop.run_forever()
loop.close()
if USE_GPIO and os.path.isfile('/opt/vc/include/bcm_host.h'):
GPIO.cleanup()
try:
mqtt_client.disconnect()
mqtt_client.loop_stop()
except:
pass
del (me)
sys.exit(0)
|
import sc2
from sc2.ids.unit_typeid import UnitTypeId
from sc2.units import Units
from sc2.position import Point2
class WorkerRushBot(sc2.BotAI):
"""
- Worker Rush
"""
def __init__(self):
super()._initialize_variables()
self.close_to_enemies_base = False
self.all_retreat = False
self.last_probe_moved = False
self.low_health_amount = 0
self.low_health_probe: Units = []
self.base_mineral = []
async def on_step(self, iteration):
if iteration == 0:
for w in self.workers:
w.move(self.enemy_start_locations[0])
self.townhalls.ready[0].train(UnitTypeId.PROBE)
self.base_mineral = self.mineral_field.closest_to(
self.townhalls.ready[0]
)
if self.units:
distance_to_enemies_base = self.units.closest_distance_to(
self.enemy_start_locations[0]
)
else:
distance_to_enemies_base = 1000
if (
distance_to_enemies_base < 20
and not self.close_to_enemies_base
):
self.close_to_enemies_base = True
if iteration > 10:
probes = self.units(UnitTypeId.PROBE).collecting
# probes = self.units(UnitTypeId.PROBE).closest_to(self.base_mineral)
[p.move(self.enemy_start_locations[0]) for p in probes]
if not self.units:
await self.client.leave()
await self.probes_micro_control()
async def on_end(self, game_result):
print(f"on_end() was called with result: {game_result}")
async def probes_micro_control(self):
enemies = self.enemy_units
enemies_can_attack = enemies.filter(
lambda u: u.can_attack_ground
)
if (
enemies
and self.close_to_enemies_base
):
for p in self.units(UnitTypeId.PROBE).filter(
lambda p: p not in self.low_health_probe
):
close_enemies = enemies_can_attack.filter(
lambda u: u.distance_to(u) < 5
)
# probe's health is too low, retreat
# if p.shield_percentage < 1 / 10:
# if close_enemies:
# p.gather(self.base_mineral)
# self.low_health_amount += 1
# self.low_health_probe.append(p)
# continue
# probe is ready to attack, shoot nearest ground unit
enemies_to_attack = enemies.filter(
lambda u: u.distance_to(p) <= 2 and not u.is_flying
)
if p.weapon_cooldown == 0 and enemies_to_attack:
focus_enemey = self.focus_enemy(p, enemies_to_attack)
if focus_enemey:
p.attack(focus_enemey)
continue
def focus_enemy(self, unit, enemies_in_range):
if enemies_in_range:
target = min(
enemies_in_range,
key=lambda e:
(e.health + e.shield) / unit.calculate_dps_vs_target(e)
)
return target
else:
return None
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
def add(self, value):
self.next = Node(value)
def reverse(self):
cur = self
new = cur.next
cur.next = None # new tail?
while new is not None:
prev = cur
cur = new
new = cur.next
cur.next = prev
return cur
root = Node(3)
cur = root
cur.add(4)
cur = cur.next
cur.add(5)
cur = cur.next
cur.add(6)
cur = cur.next
cur = root
while cur:
print(cur.value)
cur = cur.next
print("-----")
cur = root.reverse()
while cur:
print(cur.value)
cur = cur.next
|
#!/bin/python
import unittest
from types import SimpleNamespace
from sap import get_logger
import sap.errors
import sap.adt
from mock import Connection, Response
from fixtures_adt_package import GET_PACKAGE_ADT_XML
from fixtures_adt_repository import (PACKAGE_ROOT_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_ROOT_REQUEST_XML,
PACKAGE_SOURCE_LIBRARY_NODESTRUCUTRE_OK_RESPONSE,
PACKAGE_SOURCE_LIBRARY_REQUEST_XML,
PACKAGE_EMPTY_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_ENVELOPE_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_WITHOUT_SUBPKG_NODESTRUCTURE_OK_RESPONSE)
FIXTURE_PACKAGE_XML="""<?xml version="1.0" encoding="UTF-8"?>
<pak:package xmlns:pak="http://www.sap.com/adt/packages" xmlns:adtcore="http://www.sap.com/adt/core" adtcore:type="DEVC/K" adtcore:description="description" adtcore:language="EN" adtcore:name="$TEST" adtcore:masterLanguage="EN" adtcore:masterSystem="NPL" adtcore:responsible="FILAK" adtcore:version="active">
<adtcore:packageRef adtcore:name="$TEST"/>
<pak:attributes pak:packageType="development"/>
<pak:superPackage adtcore:name="$MASTER"/>
<pak:applicationComponent pak:name="PPM"/>
<pak:transport>
<pak:softwareComponent pak:name="LOCAL"/>
<pak:transportLayer pak:name="HOME"/>
</pak:transport>
<pak:translation/>
<pak:useAccesses/>
<pak:packageInterfaces/>
<pak:subPackages/>
</pak:package>"""
class TestADTPackage(unittest.TestCase):
def test_init(self):
conn = Connection(collections={'/sap/bc/adt/packages': ['application/vnd.sap.adt.packages.v1+xml']})
metadata = sap.adt.ADTCoreData(language='EN', master_language='EN', master_system='NPL', responsible='FILAK')
package = sap.adt.Package(conn, '$TEST', metadata=metadata)
package.description = 'description'
package.set_package_type('development')
package.set_software_component('LOCAL')
package.set_transport_layer('HOME')
package.set_app_component('PPM')
package.super_package.name = '$MASTER'
package.create()
self.assertEqual(len(conn.execs), 1)
self.assertEqual(conn.execs[0][0], 'POST')
self.assertEqual(conn.execs[0][1], '/sap/bc/adt/packages')
self.assertEqual(conn.execs[0][2], {'Content-Type': 'application/vnd.sap.adt.packages.v1+xml'})
self.maxDiff = None
self.assertEqual(conn.execs[0][3], FIXTURE_PACKAGE_XML)
def test_package_serialization_v2(self):
conn = Connection()
metadata = sap.adt.ADTCoreData(language='EN', master_language='EN', master_system='NPL', responsible='FILAK')
package = sap.adt.Package(conn, '$TEST', metadata=metadata)
package.description = 'description'
package.set_package_type('development')
package.set_software_component('LOCAL')
package.set_transport_layer('HOME')
package.set_app_component('PPM')
package.super_package.name = '$MASTER'
package.create()
self.assertEqual(conn.execs[0][2], {'Content-Type': 'application/vnd.sap.adt.packages.v2+xml'})
self.maxDiff = None
self.assertEqual(conn.execs[0][3], FIXTURE_PACKAGE_XML)
def test_adt_package_fetch(self):
conn = Connection([Response(text=GET_PACKAGE_ADT_XML,
status_code=200,
headers={'Content-Type': 'application/vnd.sap.adt.packages.v1+xml; charset=utf-8'})])
package = sap.adt.Package(conn, '$IAMTHEKING')
package.fetch()
self.assertEqual(len(conn.execs), 1)
self.assertEqual(conn.mock_methods(), [('GET', '/sap/bc/adt/packages/%24iamtheking')])
self.maxDiff = None
self.assertEqual(package.description, 'This is a package')
class TestADTPackageWalk(unittest.TestCase):
def test_with_empty_subpackage(self):
connection = Connection([PACKAGE_ROOT_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_SOURCE_LIBRARY_NODESTRUCUTRE_OK_RESPONSE,
PACKAGE_EMPTY_NODESTRUCTURE_OK_RESPONSE])
walk_iter = sap.adt.package.walk(sap.adt.Package(connection, '$VICTORY'))
root_path, subpackages, objects = next(walk_iter)
self.assertEqual(connection.execs[0].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[0].params['parent_name'], '$VICTORY')
self.assertEqual(connection.execs[0].body, PACKAGE_ROOT_REQUEST_XML)
self.assertEqual(connection.execs[1].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[1].params['parent_name'], '$VICTORY')
self.assertEqual(connection.execs[1].body, PACKAGE_SOURCE_LIBRARY_REQUEST_XML)
self.assertEqual(root_path, [])
self.assertEqual(subpackages, ['$VICTORY_TESTS'])
self.assertEqual(objects,
[SimpleNamespace(typ='CLAS/OC', name='ZCL_HELLO_WORLD', uri='/sap/bc/adt/oo/classes/zcl_hello_world'),
SimpleNamespace(typ='INTF/OI', name='ZIF_HELLO_WORLD', uri='/sap/bc/adt/oo/interfaces/zif_hello_world'),
SimpleNamespace(typ='PROG/P', name='Z_HELLO_WORLD', uri='/sap/bc/adt/programs/programs/z_hello_world')])
root_path, subpackages, objects = next(walk_iter)
self.assertEqual(connection.execs[2].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[2].params['parent_name'], '$VICTORY_TESTS')
self.assertEqual(connection.execs[2].body, PACKAGE_ROOT_REQUEST_XML)
self.assertEqual(len(connection.execs), 3)
self.assertEqual(root_path, ['$VICTORY_TESTS'])
self.assertEqual(subpackages, [])
self.assertEqual(objects, [])
def test_with_envelope_root(self):
connection = Connection([PACKAGE_ENVELOPE_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_WITHOUT_SUBPKG_NODESTRUCTURE_OK_RESPONSE,
PACKAGE_SOURCE_LIBRARY_NODESTRUCUTRE_OK_RESPONSE])
walk_iter = sap.adt.package.walk(sap.adt.Package(connection, '$VICTORY'))
root_path, subpackages, objects = next(walk_iter)
self.assertEqual(connection.execs[0].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[0].params['parent_name'], '$VICTORY')
self.assertEqual(connection.execs[0].body, PACKAGE_ROOT_REQUEST_XML)
self.assertEqual(root_path, [])
self.assertEqual(subpackages, ['$VICTORY_TESTS'])
self.assertEqual(objects, [])
self.assertEqual(len(connection.execs), 1)
root_path, subpackages, objects = next(walk_iter)
self.assertEqual(connection.execs[1].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[1].params['parent_name'], '$VICTORY_TESTS')
self.assertEqual(connection.execs[1].body, PACKAGE_ROOT_REQUEST_XML)
self.assertEqual(connection.execs[2].adt_uri, '/sap/bc/adt/repository/nodestructure')
self.assertEqual(connection.execs[2].params['parent_name'], '$VICTORY_TESTS')
self.assertEqual(connection.execs[2].body, PACKAGE_SOURCE_LIBRARY_REQUEST_XML)
self.assertEqual(len(connection.execs), 3)
self.assertEqual(root_path, ['$VICTORY_TESTS'])
self.assertEqual(subpackages, [])
self.assertEqual(objects,
[SimpleNamespace(typ='CLAS/OC', name='ZCL_HELLO_WORLD', uri='/sap/bc/adt/oo/classes/zcl_hello_world'),
SimpleNamespace(typ='INTF/OI', name='ZIF_HELLO_WORLD', uri='/sap/bc/adt/oo/interfaces/zif_hello_world'),
SimpleNamespace(typ='PROG/P', name='Z_HELLO_WORLD', uri='/sap/bc/adt/programs/programs/z_hello_world')])
if __name__ == '__main__':
unittest.main()
|
import json # DB->db.json内にあるデータベースへのログインデータの読み込みを行う
from peewee import * #データベースの操作に必要
def db_login():
with open('./DB/db.json', 'r', encoding='utf-8') as f:
db_set = json.load(f)
db = MySQLDatabase(
host=db_set['mariadb']['host'],
port=db_set['mariadb']['port'],
user=db_set['mariadb']['user'],
password=db_set['mariadb']['password'],
database=db_set['mariadb']['dbname'],
)
return db
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : regression03.py
@Time : 2019/07/15 20:47:44
@Author : xiao ming
@Version : 1.0
@Contact : xiaoming3526@gmail.com
@Desc : 岭回归以及逐步线性回归
@github : https://github.com/aimi-cn/AILearners
'''
# here put the import lib
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import numpy as np
'''
@description: 加载数据
@param: fileName - 文件名
@return: xArr - x数据集
yArr - y数据集
'''
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) - 1
xArr = []; yArr = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
xArr.append(lineArr)
yArr.append(float(curLine[-1]))
return xArr, yArr
'''
@description: 数据标准化
@param: xMat - x数据集
yMat - y数据集
@return: inxMat - 标准化后的x数据集
inyMat - 标准化后的y数据集
'''
def regularize(xMat, yMat):
inxMat = xMat.copy() #数据拷贝
inyMat = yMat.copy()
yMean = np.mean(yMat, 0) #行与行操作,求均值
inyMat = yMat - yMean #数据减去均值
inMeans = np.mean(inxMat, 0) #行与行操作,求均值
inVar = np.var(inxMat, 0) #行与行操作,求方差
inxMat = (inxMat - inMeans) / inVar #数据减去均值除以方差实现标准化
return inxMat, inyMat
'''
@description: 计算平方误差
@param:yArr - 预测值
yHatArr - 真实值
@return: 平方误差
'''
def rssError(yArr,yHatArr):
return ((yArr-yHatArr)**2).sum()
'''
@description: 前向逐步线性回归
@param: xArr - x输入数据
yArr - y预测数据
eps - 每次迭代需要调整的步长
numIt - 迭代次数
@return: returnMat - numIt次迭代的回归系数矩阵
'''
def stageWise(xArr, yArr, eps = 0.01, numIt = 100):
xMat = np.mat(xArr); yMat = np.mat(yArr).T #数据集
xMat, yMat = regularize(xMat, yMat) #数据标准化
m, n = np.shape(xMat)
returnMat = np.zeros((numIt, n)) #初始化numIt次迭代的回归系数矩阵
ws = np.zeros((n, 1)) #初始化回归系数矩阵
wsTest = ws.copy()
wsMax = ws.copy()
for i in range(numIt): #迭代numIt次
# print(ws.T) #打印当前回归系数矩阵
lowestError = float('inf'); #正无穷
for j in range(n): #遍历每个特征的回归系数
for sign in [-1, 1]:
wsTest = ws.copy()
wsTest[j] += eps * sign #微调回归系数
yTest = xMat * wsTest #计算预测值
rssE = rssError(yMat.A, yTest.A) #计算平方误差
if rssE < lowestError: #如果误差更小,则更新当前的最佳回归系数
lowestError = rssE
wsMax = wsTest
ws = wsMax.copy()
returnMat[i,:] = ws.T #记录numIt次迭代的回归系数矩阵
return returnMat
'''
@description: 绘制岭回归系数矩阵
@param:None
@return: None
'''
def plotstageWiseMat():
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
xArr, yArr = loadDataSet('D:/python/AILearners/data/ml/jqxxsz/8.Regression/abalone.txt')
returnMat = stageWise(xArr, yArr, 0.005, 1000)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(returnMat)
ax_title_text = ax.set_title(u'前向逐步回归:迭代次数与回归系数的关系', FontProperties = font)
ax_xlabel_text = ax.set_xlabel(u'迭代次数', FontProperties = font)
ax_ylabel_text = ax.set_ylabel(u'回归系数', FontProperties = font)
plt.setp(ax_title_text, size = 15, weight = 'bold', color = 'red')
plt.setp(ax_xlabel_text, size = 10, weight = 'bold', color = 'black')
plt.setp(ax_ylabel_text, size = 10, weight = 'bold', color = 'black')
plt.show()
if __name__ == '__main__':
plotstageWiseMat()
|
# Django
from django.contrib import admin
# Circle
from circles.models import Circle
@admin.register(Circle)
class CircleAdmin(admin.ModelAdmin):
list_display = ('slug_name', 'name', 'is_public',
'verified', 'is_limited', 'members_limit')
search_fields = ('slug_name', 'name')
list_filter = ('is_public', 'verified', 'is_limited')
actions = ['verify', 'deverify']
def verify(self, request, queryset):
queryset.update(verified=True)
def deverify(self, request, queryset):
queryset.update(verified=False)
|
from geode import *
from numpy import *
def test_sse():
random.seed(3871)
for i in xrange(10):
x = random.randn(2)
assert all(x==sse_pack_unpack(x))
|
"""
Party endpoints
"""
from flask import Flask, make_response, abort, jsonify, Blueprint,request
from app.api.v2.models import party_model
from app.api.v2.models import user_model
from app.api.v2 import database
from app.api.v2.utils.verify import verify_tokens
from app.api.v2.utils.validator import validate_party_json_keys, return_error, validate_string, strip_whitespace, check_duplication, validate_ints
PARTY = party_model.Party()
USER = user_model.User()
party_route = Blueprint('party',__name__,url_prefix='/api/v2')
@party_route.route('/parties',methods=['POST'])
def save():
"""
Save a party into the database
method: POST
"""
# check if is admin
user_email, user_id = verify_tokens()
if(USER.check_if_admin(user_id) == False):
return return_error(401,"Must be an admin to add party")
#validate json keys
json_key_errors = validate_party_json_keys(request)
if json_key_errors:
return return_error(400, "missing keys {}".format(json_key_errors))
try:
data = request.get_json(force=True)
except:
return make_response(jsonify({
"status":400,
"error":"Ensure your content type is application/json"
})),400
name = data["name"]
hqaddress = data["hqaddress"]
logoUrl = data["logoUrl"]
if(validate_string(name) == False):
return return_error(400, "Name must be of type string")
if(validate_string(hqaddress) == False):
return return_error(400, "Hqaddress must be of type string")
if(validate_string(logoUrl) == False):
return return_error(400, "LogoUrl must be of type string")
if(name == ""):
return return_error(400,"Name cannot be empty")
if(hqaddress == ""):
return return_error(400,"Hqaddress cannot be empty")
if(logoUrl == ""):
return return_error(400,"LogoUrl cannot be empty")
name = name.replace("","")
hqaddress = hqaddress.strip()
logoUrl = logoUrl.strip()
if(name == ""):
return return_error(400,"Name cannot be empty")
if(hqaddress == ""):
return return_error(400,"Hqaddress cannot be empty")
if(logoUrl == ""):
return return_error(400,"LogoUrl cannot be empty")
name = strip_whitespace(name)
hqaddress = strip_whitespace(hqaddress)
logoUrl = strip_whitespace(logoUrl)
#check if party with same name exists, if true, abort
check_duplication("name","parties", name)
PARTY.save(name, hqaddress,logoUrl)
return make_response(jsonify({
"status": 201,
"party": {
"name":name,
"hqaddress": hqaddress,
"logoUrl": logoUrl
}
}), 201)
@party_route.route('parties/<int:party_id>',methods=['DELETE'])
def delete(party_id):
"""
Delete a political party
:params: party id
"""
# check if is admin
user_email, user_id = verify_tokens()
if(USER.check_if_admin(user_id) == False):
return return_error(401,"Must be an admin to delete party")
if(validate_ints(party_id) == False):
return return_error(400, "Wrong parameters party id {}").format(party_id)
query = """SELECT * FROM parties WHERE party_id = {} """.format(party_id)
party = database.select_from_db(query)
if not party:
return make_response(jsonify({
"message": "Party with id {} does not exist".format(party_id)
}), 404)
PARTY.delete(party_id)
return make_response(jsonify({
"status":200,
"message": "Party deleted successfully"
}), 200)
@party_route.route('parties',methods=['GET'])
def get_parties():
"""
return all registered parties
"""
parties = party_model.Party()
all_parties = parties.fetch_all_parties()
if not all_parties:
return make_response(jsonify({
'status':404,
'error':'There are no registered parties yet'
}),404)
response = jsonify({
'status': 200,
'data': all_parties
})
response.status_code = 200
return response
@party_route.route('parties/<int:party_id>',methods=['GET'])
def get_specific_party(party_id):
"""
Get a specific political party by id
"""
query = """SELECT * FROM parties WHERE party_id = '{}'""".format(party_id)
party = database.select_from_db(query)
if not party:
return make_response(jsonify({
"status": 404,
"error": "Party with id {} is not available".format(party_id),
}), 404)
return make_response(jsonify({
"status": 200,
"data": party
}), 200)
@party_route.route('parties/<int:party_id>',methods=['PUT'])
def update(party_id):
""" candidate can update a party """
# check if is admin
user_email, user_id = verify_tokens()
if(USER.check_if_admin(user_id) == False):
return return_error(401,"Must be an admin to update party")
#validate json keys
json_key_errors = validate_party_json_keys(request)
if json_key_errors:
return return_error(400, "Missing keys {}".format(json_key_errors))
try:
data = request.get_json(force=True)
except:
return make_response(jsonify({
'status':400,
'error':'Data should be in json format'
}),400)
id=party_id
name = data["name"]
hqaddress = data["hqaddress"]
logoUrl = data["logoUrl"]
if(validate_string(name) == False):
return return_error(400, "Name must be of type string")
if(validate_string(hqaddress) == False):
return return_error(400, "Hqaddress must be of type string")
if(validate_string(logoUrl) == False):
return return_error(400, "LogoUrl must be of type string")
if(name == ""):
return return_error(400,"Name cannot be empty")
if(hqaddress == ""):
return return_error(400,"Hqaddress cannot be empty")
if(logoUrl == ""):
return return_error(400,"LogoUrl cannot be empty")
name = strip_whitespace(name)
hqaddress = strip_whitespace(hqaddress)
logoUrl = strip_whitespace(logoUrl)
#check if party with same name exists, if true, abort
check_duplication("name","parties", name)
check_duplication("logoUrl","parties", logoUrl)
check_duplication("hqaddress","parties", hqaddress)
PARTY.update(id, name, hqaddress, logoUrl)
return make_response(jsonify({
"status": 200,
"data": {
"name":name,
"hqaddress": hqaddress,
"logoUrl": logoUrl
}
}), 200)
|
import tkinter, passwd, _thread
class Interface():
def __init__(self, name, size):
self.Generator = passwd.Password()
self.size = size
self.root = tkinter.Tk()
self.root.title(name)
self.winfo_screen = [self.root.winfo_screenwidth()//2, self.root.winfo_screenheight()//2]
self.root.geometry(f'{size[0]}x{size[1]}+{self.winfo_screen[0]-size[0]//2}+{self.winfo_screen[1]-size[1]//2}')
self.root.minsize(size[0], size[1])
self.root.maxsize(size[0], size[1])
self.layout()
def generate(self, values):
_thread.start_new_thread(self.Generator.generate, (values, self.listbox))
def layout(self):
#Espace
tkinter.Frame(self.root, height=10).pack()
#Frame options
self.frame_options = tkinter.Frame(self.root, width=self.size[0]-100, height=100, relief=tkinter.SUNKEN, bd=1)
self.frame_options.pack()
#Itens Frame Options
self.var_checkbox = [tkinter.IntVar() for _ in range(0, 4)]
text = ('Capital letters', 'Small letters', 'Numbers', 'Symbols')
self.checkbox = [tkinter.Checkbutton(self.frame_options, text=x, variable=self.var_checkbox[text.index(x)]) for x in text]
[x.select() for x in self.checkbox]
self.checkbox[0].place(x=5, y=10)
self.checkbox[1].place(x=150, y=10)
self.checkbox[2].place(x=5, y=40)
self.checkbox[3].place(x=150, y=40)
tkinter.Label(self.frame_options, text='Digits:').place(x=10, y=70)
self.entry_digits = tkinter.Entry(self.frame_options, width=5)
self.entry_digits.place(x=65, y=70)
self.button_generate = tkinter.Button(self.frame_options, text="Generate", command=lambda *args: self.generate((self.var_checkbox, self.entry_digits)))
self.button_clean = tkinter.Button(self.frame_options, text="Clean", command=lambda *args: self.listbox.delete(0, tkinter.END))
self.button_generate.place(x=140, y=65)
self.button_clean.place(x=230, y=65)
#Bind
self.entry_digits.bind("<Return>", lambda *args: self.generate((self.var_checkbox, self.entry_digits)))
self.button_generate.bind("<Return>", lambda *args: self.generate((self.var_checkbox, self.entry_digits)))
self.button_generate.bind("<Enter>", lambda *args: self.button_generate.focus_force())
self.button_generate.bind_all("<Alt-KeyPress-g>", lambda *args: self.generate((self.var_checkbox, self.entry_digits)))
self.button_clean.bind("<Return>", lambda *args: self.listbox.delete(0, tkinter.END))
self.button_clean.bind_all("<Alt-KeyPress-c>", lambda *args: self.listbox.delete(0, tkinter.END))
#Espace
tkinter.Frame(self.root, height=10).pack()
#Frame ListBox
self.frame_listbox = tkinter.Frame(self.root, relief=tkinter.SUNKEN, bd=1)
self.frame_listbox.pack()
scrollbar = [tkinter.Scrollbar(self.frame_listbox, orient=x) for x in ('vertical', 'horizontal')]
self.listbox = tkinter.Listbox(self.frame_listbox, width=37, height=20, selectmode=tkinter.EXTENDED, yscrollcommand=scrollbar[0].set, xscrollcommand=scrollbar[1].set)
scrollbar[0].config(command=self.listbox.yview)
scrollbar[1].config(command=self.listbox.xview)
scrollbar[0].pack(side="right", fill="y")
scrollbar[1].pack(side="bottom", fill="x")
self.listbox.pack()
def run(self):
self.root.mainloop()
|
"""
Youtube Tag
---------
This implements a Liquid-style youtube tag for Pelican,
based on the jekyll / octopress youtube tag [1]_
Syntax
------
{% youtube id [width height] %}
Example
-------
{% youtube dQw4w9WgXcQ 640 480 %}
Output
------
<span class="videobox">
<iframe
width="640" height="480"
src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0"
webkitAllowFullScreen mozallowfullscreen allowFullScreen>
</iframe>
</span>
[1] https://gist.github.com/jamieowen/2063748
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% youtube id [width height] %}"
YOUTUBE = re.compile(r'([\S]+)(\s+([\d%]+)\s([\d%]+))?')
@LiquidTags.register('youtube')
def youtube(preprocessor, tag, markup):
width = 640
height = 390
youtube_id = None
config_thumb_only = preprocessor.configs.getConfig('YOUTUBE_THUMB_ONLY')
config_thumb_size = preprocessor.configs.getConfig('YOUTUBE_THUMB_SIZE')
thumb_sizes = {
'maxres': [1280, 720],
'sd': [640, 480],
'hq': [480, 360],
'mq': [320, 180]
}
if config_thumb_only:
if not config_thumb_size:
config_thumb_size = 'sd'
try:
width = thumb_sizes[config_thumb_size][0]
height = thumb_sizes[config_thumb_size][1]
except KeyError:
pass
match = YOUTUBE.search(markup)
if match:
groups = match.groups()
youtube_id = groups[0]
width = groups[2] or width
height = groups[3] or height
if youtube_id:
if config_thumb_only:
thumb_url = 'https://img.youtube.com/vi/{youtube_id}'.format(
youtube_id=youtube_id)
youtube_out = """<a
href="https://www.youtube.com/watch?v={youtube_id}"
class="youtube_video" alt="YouTube Video"
title="Click to view on YouTube">
<img width="{width}" height="{height}"
src="{thumb_url}/{size}default.jpg">
</a>""".format(width=width, height=height,
youtube_id=youtube_id,
size=config_thumb_size,
thumb_url=thumb_url)
else:
youtube_out = """
<span class="videobox">
<iframe width="{width}" height="{height}"
src='https://www.youtube.com/embed/{youtube_id}'
frameborder='0' webkitAllowFullScreen
mozallowfullscreen allowFullScreen>
</iframe>
</span>
""".format(width=width, height=height,
youtube_id=youtube_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return youtube_out
# ---------------------------------------------------
# This import allows youtube tag to be a Pelican plugin
from liquid_tags import register # noqa
|
"""
https://fe8ebede-836e-432b-ad74-af1265de3de2.mock.pstmn.io/rangos1
<ConsultaRangosResult>
<Mensaje>OK</Mensaje>
<Rangos>
<Rango>
<Desde>501</Desde>
<Hasta>540</Hasta>
</Rango>
<Rango>
<Desde>551</Desde>
<Hasta>600</Hasta>
</Rango>
</Rangos>
</ConsultaRangosResult>
[{'desde': 501, 'hasta': 540}, {'desde': 551, 'hasta': 600}]
"""
import requests
r = requests.get(
'https://fe8ebede-836e-432b-ad74-af1265de3de2.mock.pstmn.io/rangos1')
response = r.text
mylista = []
r = response.splitlines()
le = len(r)
for i in range(le):
if '<Rango>' in r[i]:
dic = dict()
if '<Desde>' in r[i+1]:
dic['Desde'] = r[i+1].split('<Desde>')
e = r[i+1].split('<Desde>')
f = e[1].split('</Desde>')
dic['Desde'] = f[0]
if '<Hasta>' in r[i+2]:
dic['Hasta'] = r[i+2].split('<Hasta>')
e = r[i+2].split('<Hasta>')
f = e[1].split('</Hasta>')
dic['Hasta'] = f[0]
mylista.append(dic)
print(mylista)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.