content
stringlengths 5
1.05M
|
|---|
import logging
from django.conf import settings
from django.utils.datastructures import MultiValueDictKeyError
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from io import open
import os
import pdfplumber
from .map_res_table import draw_map_table
from .models import EarlyliteracySkillSetScores, MapTestCheckItem
from django.http import JsonResponse
# from rest_framework.parsers import JSONParser
from .parse_helper import ExtractStarData, extract_map_data
from .star_reading_table import draw_star_reading_table
log = logging.getLogger("map_test")
# Create your views here.
@csrf_exempt
def choose_file(request):
temp = loader.get_template('pdf2MySQL/upload_file.html')
return HttpResponse(temp.render())
@csrf_exempt
def upload_file(request):
return render(request, 'pdf2MySQL/upload_file.html')
@csrf_exempt
def handle_pdf_data(request):
# request.Files['myfile']
if 'phone_number' not in request.POST:
return JsonResponse({"errorCode": "400",
"executed": True,
"message": "No phone number input for pdf upload!",
"success": False}, status=200)
phonenumber = request.POST['phone_number']
test_type = request.POST['test_type']
''' 3 types of test report.
"star_early", "star_reading", "map_test"
'''
log.warning("Import {} report for user {}".format(test_type, phonenumber))
if not phonenumber:
return JsonResponse({"errorCode": "400",
"executed": True,
"message": "No phone number input for pdf upload!",
"success": False}, status=200)
if request.method == 'POST': # 请求方法为POST时,进行处理
try:
myFile = request.FILES['myfile'] # 获取上传的文件,如果没有文件,则默认为None
except MultiValueDictKeyError as err:
log.warning(err)
return JsonResponse({"errorCode": "400",
"executed": True,
"message": "Need to choose a PDF file for upload. {}!".format(err),
"success": False}, status=200)
if not myFile:
return JsonResponse({"errorCode": "400",
"executed": True,
"message": "No file was uploaded!",
"success": False}, status=200)
destination = open(os.path.join(settings.MEDIA_ROOT, myFile.name), 'wb+') # 打开特定的文件进行二进制的写操作
for chunk in myFile.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
################################################################
# trans to txt file and stored in txtintermediate dictionary
################################################################
pdffilestored = os.path.join(settings.MEDIA_ROOT, myFile.name)
with pdfplumber.open(pdffilestored) as pdf:
content = ''
# len(pdf.pages)为PDF文档页数
for i in range(len(pdf.pages)):
# pdf.pages[i] 是读取PDF文档第i+1页
page = pdf.pages[i]
# page.extract_text()函数即读取文本内容,下面这步是去掉文档最下面的页码
page_content = '\n'.join(page.extract_text().split('\n')[1:-1])
content = content + page_content
# print(content)
################################################################
# trans end
################################################################
try:
if test_type == "star_early":
ExtractStarData(content, phonenumber)
elif test_type == "map_test":
stu_map_pro = extract_map_data(content, phonenumber)
draw_map_table(stu_map_pro)
elif test_type == "star_reading":
draw_star_reading_table()
else:
raise
except Exception as err:
log.error(err)
log.error("Upload pdf {} failed!".format(myFile.name))
temp = loader.get_template('pdf2MySQL/show_failed.html')
raise
else:
temp = loader.get_template('pdf2MySQL/show_success.html')
os.remove(pdffilestored)
# os.remove(txtfilestored)
return HttpResponse(temp.render())
def show(self, request):
temp = loader.get_template('pdf2MySQL/show_success.html')
return HttpResponse(temp.render())
# @login_required
# @ensure_csrf_cookie
@csrf_exempt
def get_student_exam_stats(request, phone):
if request.method == 'GET':
instance = list(EarlyliteracySkillSetScores.objects.filter(phone_number=phone).order_by('-TestDate')[:3])
log.warning("Get {} test results for user {}".format(len(instance), phone))
if not instance or len(instance) <= 0:
return JsonResponse({"errorCode": "400",
"executed": True,
"message": "User with phone {} does not have any test result!".format(phone),
"success": False}, status=200)
else:
scaled_score = instance[0].ScaledScore
lexile_measure = instance[0].LexileMeasure
test_date = instance[0].TestDate
sub_items_alphabetic_principle = [instance[0].AlphabeticKnowledge,
instance[0].AlphabeticSequence,
instance[0].LetterSounds,
instance[0].PrintConceptsWordLength,
instance[0].PrintConceptsWordBorders,
instance[0].PrintConceptsLettersAndWords,
instance[0].Letters,
instance[0].IdentificationAndWordMatching]
sub_items_phonemic_awareness = [instance[0].RhymingAndWordFamilies,
instance[0].BlendingWordParts,
instance[0].BlendingPhonemes,
instance[0].InitialAndFinalPhonemes,
instance[0].ConsonantBlendsPA,
instance[0].MedialPhonemeDiscrimination,
instance[0].PhonemeIsolationORManipulation,
instance[0].PhonemeSegmentation]
sub_items_phonics1 = [instance[0].ShortVowelSounds,
instance[0].InitialConsonantSounds,
instance[0].FinalConsonantSounds,
instance[0].LongVowelSounds,
instance[0].VariantVowelSounds,
instance[0].ConsonantBlendsPH]
sub_items_phonics2 = [instance[0].ConsonantDigraphs,
instance[0].OtherVowelSounds,
instance[0].SoundSymbolCorrespondenceConsonants,
instance[0].WordBuilding,
instance[0].SoundSymbolCorrespondenceVowels,
instance[0].WordFamiliesOrRhyming]
sub_items_structural_vocabulary = [instance[0].WordsWithAffixes,
instance[0].Syllabification,
instance[0].CompoundWords,
instance[0].WordFacility,
instance[0].Synonyms,
instance[0].Antonyms]
sub_items_other_domains = [instance[0].ComprehensionATtheSentenceLevel,
instance[0].ComprehensionOfParagraphs,
instance[0].NumberNamingAndNumberIdentification,
instance[0].NumberObjectCorrespondence,
instance[0].SequenceCompletion,
instance[0].ComposingAndDecomposing,
instance[0].Measurement]
# sub_domain_score = [instance[0].AlphabeticPrinciple, instance[0].ConceptOfWord,
# instance[0].VisualDiscrimination,
# instance[0].Phonics, instance[0].StructuralAnalysis, instance[0].Vocabulary,
# instance[0].SentenceLevelComprehension, instance[0].PhonemicAwareness,
# instance[0].ParagraphLevelComprehension, instance[0].EarlyNumeracy]
sub_domain_score_trend_date = []
sub_domain_score_trend_value = []
for result in reversed(instance):
sub_domain_score_trend_date.append(result.TestDate)
sub_domain_score_data = [
round((result.AlphabeticPrinciple + result.ConceptOfWord + result.VisualDiscrimination) / 3, 1),
result.PhonemicAwareness, result.Phonics, (result.StructuralAnalysis + result.Vocabulary) / 2,
round((
result.SentenceLevelComprehension + result.ParagraphLevelComprehension + result.EarlyNumeracy) / 3,
1)]
sub_domain_score_trend_value.append(sub_domain_score_data)
return JsonResponse({
"test_date": test_date,
"lexile_measure": lexile_measure,
"scaled_score": scaled_score,
"sub_items_alphabetic_principle": sub_items_alphabetic_principle,
"sub_items_phonemic_awareness": sub_items_phonemic_awareness,
"sub_items_phonics1": sub_items_phonics1,
"sub_items_phonics2": sub_items_phonics2,
"sub_items_structural_vocabulary": sub_items_structural_vocabulary,
"sub_items_other_domains": sub_items_other_domains,
"sub_domain_score_trend_date": sub_domain_score_trend_date,
"sub_domain_score_trend_value": sub_domain_score_trend_value,
"errorCode": "200",
"executed": True,
"message": "Succeed to get latest test result of user {}!".format(phone),
"success": True
}, status=200)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from mock import Mock
class DictMock(Mock):
"""Like Mock, except also "virally" returns DictMocks
upon __getitem__
"""
__dict_contents = defaultdict(lambda: DictMock())
def __getitem__(self, name):
return self.__dict_contents[name]
def __setitem__(self, name, val):
self.__dict_contents[name] = val
|
import pybreaker
from ..formation import _CONTEXT, _RES_HTTP
class BreakerTriggerException(Exception):
pass
def breaker_logger(logger):
class LogListener(pybreaker.CircuitBreakerListener):
"Listener used to log circuit breaker events."
def state_change(self, cb, old_state, new_state):
logger.warn(
"circuitbreaker.state_changed",
name=cb.name,
old_state=old_state.name,
new_state=new_state.name,
)
return LogListener()
def trigger_breaker_if(trigger):
def trigger_breaker_middleware(ctx, call):
ctx = call(ctx)
if trigger(ctx.get(_RES_HTTP)):
raise BreakerTriggerException
return trigger_breaker_middleware
def circuit_breaker(
logger, name, fail_max=5, reset_timeout=60, state_storage=None, exclude=[]
):
breaker = pybreaker.CircuitBreaker(
name=name,
listeners=[breaker_logger(logger)],
exclude=exclude,
fail_max=fail_max,
reset_timeout=reset_timeout,
state_storage=state_storage,
)
def circuit_breaker_middleware(ctx, call):
context = ctx.get(_CONTEXT, {})
log = logger.bind(**context)
if breaker.current_state == "open":
log.info("circuitbreaker.open", name=breaker.name)
call = breaker(call)
try:
ctx = call(ctx)
return ctx
except pybreaker.CircuitBreakerError:
return ctx
return circuit_breaker_middleware
|
import logging
import os
import random
import string
import time
from datetime import datetime, timezone
from enum import Enum, unique
from typing import Optional, List
import requests
logger = logging.getLogger(__name__)
BROKER_URL = os.getenv('BDCAT_SB_BROKER_URL', 'https://qa-broker.sbgenomics.com')
BROKER_TOKEN = os.getenv('BDCAT_SB_BROKER_TOKEN')
@unique
class SBEnv(Enum):
"""Internal SB environment names for BDC"""
staging = 'f4c-staging-vayu'
production = 'ffc'
def new_task_id(sb_environment: SBEnv, new_task: dict) -> str:
"""Generate a unique task ID for test runs"""
date = datetime.now(tz=timezone.utc)
return 'bdc-{}-{}{}-{}-{}'.format(
sb_environment.name,
new_task['test_plan_id'],
'-subset' if new_task.get('test_ids') else '',
date.strftime('%Y%m%d-%H%M%S'),
# extra randomness for when test runs are started in the same second
''.join(random.sample(string.ascii_lowercase, 3))
)
class SevenBridgesBrokerClient:
"""HTTP client for the SevenBridges QA broker
The SevenBridges QA broker is a service exposed on the public internet
that acts as an intermediary for running tests on the internal
infrastructure of SevenBridges.
A token obtained manually is needed for authentication.
"""
def __init__(self, token=BROKER_TOKEN, base_url=BROKER_URL):
if token is not None:
self._headers = {'Authorization': f'Bearer {token}'}
else:
self._headers = {}
self._base_url = base_url
self._session = requests.Session()
@staticmethod
def _check_response(resp, *, expected_code):
if resp.status_code != expected_code:
raise requests.HTTPError(
f'[{resp.request.method} {resp.url} {resp.reason}] '
f'Expected 200, got {resp.status_code}: {resp.text}'
)
def request(self, method, path, *, json=None, params=None):
url = self._base_url + path
return self._session.request(method, url,
headers=self._headers,
json=json, params=params)
def new_test_run(self, sb_environment: SBEnv, test_plan: str,
subset: Optional[List[str]] = None) -> dict:
"""Start a new test run
:param sb_environment: Target SevenBridges environment.
:param test_plan: SevenBridges-internal tests path.
:param subset: Subset of test names from the test plan to run.
:raises requests.HTTPError: Test run could not be started.
"""
new_task = {
'environment': sb_environment.value,
'test_plan_id': test_plan,
}
if subset is not None:
new_task['test_ids'] = subset
task_id = new_task_id(sb_environment, new_task)
logger.info('Starting a new test run of %s: %s', test_plan, task_id)
resp = self.request('PUT', f'/tasks/{task_id}',
json=new_task,
params=dict(force_retries=1))
self._check_response(resp, expected_code=201)
return resp.json()
def wait_until_done(self, task: dict, timeout=1800, poll_frequency=15) -> dict:
"""Wait for a task to be in a READY state
https://docs.celeryproject.org/en/stable/_modules/celery/states.html#state
:param task: Task data.
:param timeout: How many seconds to wait before raising a TimeoutError.
:param poll_frequency: How often (in seconds) to check task state while waiting.
:raises TimeoutError: Not in a READY state after the given amount of time.
:raises requests.HTTPError: Test run state could not be refreshed.
:raises RuntimeError: Test run task is failed or revoked for some reason.
"""
task_id = task['id']
ready_states = {'SUCCESS', 'FAILURE', 'REVOKED'}
start_time = time.monotonic()
logger.info('Waiting for test run %s to complete', task_id)
while time.monotonic() - start_time < timeout:
# Refresh task state
resp = self.request('GET', f'/tasks/{task_id}')
self._check_response(resp, expected_code=200)
task = resp.json()
logger.info('Test run %s is %s', task_id, task['state'])
if task['state'] in ready_states:
if task['state'] == 'SUCCESS':
logger.info('Test run report: %s',
f'{self._base_url}/reports/{task_id}')
return task
raise RuntimeError('Test run {} is {}: {}'.format(
task_id, task['state'], repr(task)
))
time.sleep(poll_frequency)
raise TimeoutError(f'Task not ready after {timeout}s: {repr(task)}')
def assert_all_tests_passed(self, task: dict):
"""Get the test run report and assert that all tests have passed
:param task: Task data.
:raises requests.HTTPError: Could not get test run report.
"""
task_id = task['id']
resp = self.request('GET', f'/reports/{task_id}')
self._check_response(resp, expected_code=200)
report = resp.json()
failed_tests = []
for test_result in report['results']:
if test_result['state'] not in ('PASSED', 'SKIPPED'):
failed_tests.append(test_result['id'])
logger.info('[%s] Failed test: %s', task_id, test_result['id'])
assert len(failed_tests) == 0
def execute(sb_environment: SBEnv, test_plan: str,
subset: Optional[List[str]] = None):
broker = SevenBridgesBrokerClient()
task = broker.new_test_run(sb_environment, test_plan, subset=subset)
task = broker.wait_until_done(task)
broker.assert_all_tests_passed(task)
|
# coding: utf-8
from vnpy.trader.vtConstant import *
from vnpy.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate,
BarGenerator,
ArrayManager)
from collections import defaultdict
import numpy as np
import talib as ta
import pandas as pd
from datetime import datetime
class DemoStrategy(CtaTemplate):
className = 'DemoStrategy' # 策略 和 MongoDb数据表 的名称
author = 'Patrick'
version = '1.1'
# 策略交易标的
activeSymbol = EMPTY_STRING # 主动品种
passiveSymbol = EMPTY_STRING # 被动品种
# 策略变量
posDict = {}
posSize = 1 # 每笔下单的数量
# 参数列表,保存了参数的名称,在实盘交易时,作为策略参数在UI显示
paramList = ['name',
'className',
'author',
'activeSymbol',
'passiveSymbol']
# 变量列表,保存了变量的名称,在实盘交易时,作为策略变量在UI显示
varList = ['inited',
'trading',
'posDict'
]
# 同步列表,保存了需要保存到数据库的变量名称,posDict 和 eveningDict 为必填
syncList = ['posDict',
'eveningDict']
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(DemoStrategy, self).__init__(ctaEngine, setting)
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.activeSymbol = self.symbolList[0] # 主动品种
# self.passiveSymbol = self.symbolList[1] # 被动品种
# 生成所有品种相应的 bgDict 和 amDict,用于存放一定时间长度的行情数据,时间长度size默认值是100
# 示例: self.generateBarDict( self.onBar, 5, self.on5MinBar)
# 将同时生成 self.bg5Dict 和 self.am5Dict ,字典的key是品种名,
# 用于生成 on5MinBar 需要的 Bar 和计算用的 bar array,可在 on5MinBar() 获取
self.generateBarDict(self.onBar)
# self.generateBarDict(self.onBar,5,self.on5MinBar,size =10)
# # 对于高频交易员,提供秒级别的 Bar,或者可当作秒级计数器,参数为秒,可在 onHFBar() 获取
# self.generateHFBar(10)
# 回测和实盘的获取历史数据部分,建议实盘初始化之后得到的历史数据和回测预加载数据交叉验证,确认代码正确
if self.ctaEngine.engineType == 'backtesting':
# 获取回测设置中的initHours长度的历史数据
self.initBacktesingData()
elif self.ctaEngine.engineType == 'trading':
pass
# 实盘载入1分钟历史数据,并采用回放计算的方式初始化策略参数
# 通用可选参数:["1min","5min","15min","30min","60min","4hour","1day","1week","1month"]
# pastbar1 = self.loadHistoryBar(self.activeSymbol,
# type_ = "1min", size = 1000)
# pastbar2 = self.loadHistoryBar(self.passiveSymbol,
# type_ = "1min", size = 1000)
# # 更新数据矩阵(optional)
# for bar1,bar2 in zip(pastbar1,pastbar2):
# self.amDict[self.activeSymbol].updateBar(bar1)
# self.amDict[self.passiveSymbol].updateBar(bar2)
self.putEvent() # putEvent 能刷新UI界面的信息
'''
实盘在初始化策略时, 如果将历史数据推送到onbar去执行updatebar, 此时引擎的下单逻辑为False, 不会触发下单。
'''
# ----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.putEvent()
'''
实盘在点击启动策略时, 此时的引擎下单逻辑改为True, 此时开始推送到onbar的数据, 会触发下单。
'''
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.putEvent()
# ----------------------------------------------------------------------
def onRestore(self):
"""从错误状态恢复策略(必须由用户继承实现)"""
self.putEvent()
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送"""
# 在每个Tick推送过来的时候,进行updateTick,生成分钟线后推送到onBar.
# 注:如果没有updateTick,实盘将不会推送1分钟K线
self.bgDict[tick.vtSymbol].updateTick(tick)
# self.hfDict[tick.vtSymbol].updateTick(tick)
# ----------------------------------------------------------------------
def onHFBar(self,bar):
"""收到高频bar推送(需要在onInit定义频率,否则默认不推送)"""
self.writeCtaLog('stg_onHFbar_check_%s_%s_%s'%(bar.vtSymbol,bar.datetime,bar.close))
# ----------------------------------------------------------------------
def onBar(self,bar):
"""收到1分钟K线推送"""
self.writeCtaLog('stg_onbar_check_%s_%s_%s'%(bar.vtSymbol,bar.datetime,bar.close))
# self.bg5Dict[bar.vtSymbol].updateBar(bar) # 需要将Bar数据同时推给 5MinBar 相应的bg字典去合成
self.buy(self.activeSymbol, # 下单交易品种
bar.close*0.95, # 下单的价格
1, # 交易数量
priceType = PRICETYPE_LIMITPRICE, # 价格类型:[PRICETYPE_LIMITPRICE,PRICETYPE_MARKETPRICE,PRICETYPE_FAK,PRICETYPE_FOK]
levelRate = 1) # 保证金交易可填杠杆参数,默认levelRate = 0
self.putEvent()
# ----------------------------------------------------------------------
# def on5MinBar(self,bar):
# """收到5分钟K线推送"""
# self.writeCtaLog('stg_on5Minbar_check_%s_%s_%s'%(bar.vtSymbol,bar.datetime,self.am5Dict[bar.vtSymbol].close))
# self.am5Dict[bar.vtSymbol].updateBar(bar) # 需要将5MinBar数据同时推给 5MinBar 的array字典去保存,用于talib计算
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
content = u'stg_onorder订单状态, statu:%s, id:%s, dealamount:%s'%(order.status, order.vtOrderID, order.tradedVolume)
self.mail(content) # 邮件模块可以将信息发送给策略师,参数为邮件正文,回测会自动过滤这个方法
self.putEvent()
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交信息变化推送"""
self.putEvent()
# ---------------------------------------------------------------------
def onStopOrder(self, so):
"""停止单推送"""
pass
|
import pygame
from game.control_object import ButtonObject, MenuObject
from game.interfaces import ILocation
from game.image_collection import ControlImageCollection
big_star = ControlImageCollection("../game_assets/star1.png", 50, 50).download_image()
small_star = ControlImageCollection("../game_assets/star1.png", 36, 36).download_image()
class Button(ButtonObject, ILocation):
"""
Button class for menu objects
"""
def __init__(self, menu, img, name):
super().__init__()
self.name = name
self.img = img
self.x = menu.x - 50
self.y = menu.y - 105
self.menu = menu
self.item_cost = []
self.width = self.img.get_width()
self.height = self.img.get_height()
def click(self, x, y):
"""
return true if the user has collided with the menu
:param x: int
:param y: int
:return: bool
"""
if self.x <= x <= self.x + self.width:
if self.y <= y <= self.y + self.height:
return True
return False
def update(self):
"""
Updates button position
:return: None
"""
self.x = self.menu.x - 50
self.y = self.menu.y - 105
def draw(self, win):
"""
Just draw the button in position(x, y)
:param win: surface
:return: None
"""
win.blit(self.img, (self.x, self.y))
def get_position(self):
return self.x, self.y
class Menu(MenuObject, ILocation):
"""
menu for holding items
"""
def __init__(self, tower, x, y, img, item_cost):
self.x = x
self.y = y
self.width = img.get_width()
self.height = img.get_height()
self.items = 0
self.item_cost = item_cost
self.buttons = []
self.bg = img
self.font = pygame.font.SysFont("conicsans", 25)
self.tower = tower
def add_btn(self, img, name):
"""
Add button to menu
:param img: surface
:param name: str
:return: None
"""
self.items += 1
self.buttons.append(Button(self, img, name))
def draw(self, win):
"""
Draws buttons and menu background
:param win: surface
:return: None
"""
win.blit(self.bg, (self.x - self.bg.get_width() / 2, self.y - 120))
for item in self.buttons:
item.draw(win)
win.blit(big_star, (item.x + item.width + 3, item.y - 3))
text = self.font.render(str(self.item_cost[self.tower.level - 1]), 1, (255, 255, 255))
win.blit(text, (item.x + item.width + 32 - text.get_width() / 2, item.y + big_star.get_height() - 17))
def get_item_cost(self):
"""
Gets cost of tower upgrade to the next level
:return: int
"""
return self.item_cost[self.tower.level - 1]
def get_clicked(self, x, y):
"""
return the clicked item from the menu
:param x: int
:param y: int
:return: str
"""
for btn in self.buttons:
if btn.click(x, y):
return btn.name
return None
def update(self):
"""
Update menu and button location
:return: None
"""
for btn in self.buttons:
btn.update()
def get_position(self):
return self.x, self.y
class PlayPauseButton(Button):
def __init__(self, play_img, pause_image, x, y):
self.img = play_img
self.play_img = play_img
self.pause_img = pause_image
self.x = x
self.y = y
self.item_cost = []
self.width = self.img.get_width()
self.height = self.img.get_height()
self.pause = True
self.font = pygame.font.SysFont("comicsans", 23)
def draw(self, win):
if self.pause:
win.blit(self.play_img, (self.x, self.y))
else:
win.blit(self.pause_img, (self.x, self.y))
class VerticalButton(Button):
"""
Button class for menu objects
"""
def __init__(self, x, y, img, name, cost):
self.name = name
self.img = img
self.x = x
self.y = y
self.item_cost = []
self.width = self.img.get_width()
self.height = self.img.get_height()
self.cost = cost
class VerticalMenu(Menu):
"""
Vertical menu for side bar of game
"""
def __init__(self, x, y, img):
self.x = x
self.y = y
self.width = img.get_width()
self.height = img.get_height()
self.items = 0
self.buttons = []
self.bg = img
self.font = pygame.font.SysFont("conicsans", 25)
def add_btn(self, img, name, cost):
"""
Add button to vertical menu
:param img: surface
:param name: str
:return: None
"""
self.items += 1
btn_x = self.x - 30
btn_y = self.y - 30 + (self.items - 1) * 120
self.buttons.append(VerticalButton(btn_x, btn_y, img, name, cost))
def draw(self, win):
"""
Draws buttons and menu background
:param win: surface
:return: None
"""
win.blit(self.bg, (self.x - self.bg.get_width() / 2, self.y - 110))
for item in self.buttons:
VerticalButton.draw(item, win)
win.blit(small_star, (item.x + item.width // 2 + 2, item.y + item.height - 20))
text = self.font.render(str(item.cost), 1, (255, 255, 255))
win.blit(text, (item.x - text.get_width() // 2 + 20, item.y + small_star.get_height() + 21))
def get_item_cost(self, name):
"""
gets cost of items
:param name: str
:return: int
"""
for btn in self.buttons:
if btn.name == name:
return btn.cost
return -1
|
FW = "../examples/mac/devboard/Device.hex"
#ex.otap(node.keys(), FW, 0)
for k in range(500):
for n in nodes:
print "----- Node " + hex(n.id) + " ----------------"
print (n.sys.led(2))
print(n.sys.ar())
u = n.sys.util()
if u != None:
print(str(u.mem))
print (str(n.sys.ping()))
print(n.sys.ac())
|
#!/usr/bin/env python3
import os
import json
import uuid
from dsrlib.meta import Meta
from .actions import Axis, ActionVisitor, InvertPadAxisAction, SwapAxisAction, \
GyroAction, CustomAction, DisableButtonAction
from .buttons import Buttons
from .configuration import Configuration
class BaseJSONWriter(ActionVisitor):
VERSION = 0
def encodeWorkspace(self, workspace):
configurations = []
for configuration in workspace.configurations():
configurations.append(self.encodeConfiguration(configuration))
return {'configurations': configurations}
def encodeConfiguration(self, configuration):
actions = []
for action in configuration.actions():
actions.append(self.encodeAction(action))
return {'name': configuration.name(), 'uuid': configuration.uuid(), 'thumbnail': self.pathFor(configuration.thumbnail()), 'actions': actions, 'description': configuration.description()}
def encodeAction(self, action):
return self.visit(action)
def _acceptDisableButtonAction(self, action):
return {'type': 'disable_button', 'button': action.button().name}
def _acceptInvertPadAxisAction(self, action):
return {'type': 'invert_pad', 'pad': action.pad(), 'axis': action.axis()}
def _acceptSwapAxisAction(self, action):
axis1, axis2 = action.axis()
return {'type': 'swap_pads', 'axis1': axis1.name, 'axis2': axis2.name}
def _acceptGyroAction(self, action):
return {'type': 'gyro', 'buttons': [button.name for button in action.buttons()]}
def _acceptCustomAction(self, action):
return {'type': 'custom', 'code': action.source()}
def pathFor(self, filename):
raise NotImplementedError
class JSONWriter(BaseJSONWriter):
def write(self, stream, workspace):
data = self.encodeWorkspace(workspace)
json.dump({'version': self.VERSION, 'workspace': data}, stream)
def pathFor(self, filename):
return filename
class JSONExporter(BaseJSONWriter):
def __init__(self, zipobj):
self._zipobj = zipobj
self._count = 0
def write(self, configuration):
data = self.encodeConfiguration(configuration)
data['version'] = self.VERSION
data['uuid'] = uuid.uuid1().hex
self._zipobj.writestr('configuration.json', json.dumps(data, indent=2))
def pathFor(self, filename):
_, ext = os.path.splitext(filename)
name = '%d%s' % (self._count, ext)
self._count += 1
self._zipobj.write(filename, name)
return name
class BaseJSONReader:
def decodeWorkspace(self, workspace, data):
self.version = data['version'] # pylint: disable=W0201
for cdata in data['workspace']['configurations']:
configuration = self.decodeConfiguration(cdata)
workspace.configurations().addItem(configuration)
def decodeConfiguration(self, data):
configuration = Configuration(uid=data['uuid'])
configuration.setName(data['name'])
configuration.setDescription(data.get('description', ''))
configuration.setThumbnail(self.pathFor(data['thumbnail']))
for adata in data['actions']:
action = self.decodeAction(adata)
configuration.addAction(action)
return configuration
def decodeAction(self, data):
if data['type'] == 'disable_button':
action = DisableButtonAction()
action.setButton(Buttons[data['button']])
if data['type'] == 'invert_pad':
action = InvertPadAxisAction()
action.setPad(data['pad'])
action.setAxis(data['axis'])
if data['type'] == 'swap_pads':
action = SwapAxisAction()
action.setAxis1(Axis[data['axis1']])
action.setAxis2(Axis[data['axis2']])
if data['type'] == 'gyro':
action = GyroAction()
action.setButtons([Buttons[name] for name in data['buttons']])
if data['type'] == 'custom':
action = CustomAction()
action.setSource(data['code'])
return action
def pathFor(self, filename):
raise NotImplementedError
class JSONReader(BaseJSONReader):
def read(self, stream, workspace):
data = json.load(stream)
self.version = data['version'] # pylint: disable=W0201
self.decodeWorkspace(workspace, data)
def pathFor(self, filename):
return filename
class JSONImporter(BaseJSONReader):
def __init__(self):
self._zipobj = None
def read(self, zipobj):
self._zipobj = zipobj
data = json.loads(zipobj.read('configuration.json').decode('utf-8'))
return self.decodeConfiguration(data)
def pathFor(self, filename):
dst = Meta.newThumbnail(filename)
self._zipobj.extract(filename, os.path.dirname(dst))
os.rename(os.path.join(os.path.dirname(dst), filename), dst)
return dst
|
import numpy as np
import torch
import yaml
import os
from utils.tracking_utils import *
from utils.kalman_filter import KalmanBoxTracker
from scipy.optimize import linear_sum_assignment
import sys
import argparse
import time
def associate_instances(previous_instances, current_instances, overlaps, pose, association_weights):
pose = pose.cpu().float()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
p_n = len(previous_instances.keys())
c_n = len(current_instances.keys())
association_costs = torch.zeros(p_n, c_n)
prev_ids = []
current_ids = []
current_instances_prev = {}
for i, (k, v) in enumerate(previous_instances.items()):
#v['kalman_bbox'][0:3] += pose[:3, 3]
#v['kalman_bbox'][0:3] = torch.matmul(v['kalman_bbox'][0:3],pose[:3, :3])
#v['bbox'][0:3] = v['kalman_bbox'][0:3] - v['kalman_bbox'][4:]/2
#v['bbox'][3:] = v['kalman_bbox'][0:3] + v['kalman_bbox'][4:] / 2
pass
for i, (k, v) in enumerate(previous_instances.items()):
prev_ids.append(k)
for j, (k1, v1) in enumerate(current_instances.items()):
if v1['class'] == v['class'] and k1 not in overlaps:
#cost_3d = 1 - IoU(v1['bbox'], v['bbox'])
#if k1 in current_instances_prev:
# cost_3d = min (cost_3d, 1 - IoU(current_instances_prev[k1]['bbox'], v['bbox']))
#if cost_3d > 0.75:
# cost_3d = 1e8
#if v1['bbox_proj'] is not None:
# cost_2d = 1 - IoU(v1['bbox_proj'], v['bbox_proj'])
# if k1 in current_instances_prev:
# cost_2d = min(cost_2d, 1 - IoU(current_instances_prev[k1]['bbox_proj'], v['bbox_proj']))
# if cost_2d > 0.75:
# cost_2d = 1e8
#else:
# cost_2d = 0
cost_center = euclidean_dist(v1['kalman_bbox'], v['kalman_bbox'])
if k1 in current_instances_prev:
cost_center = min(cost_center, euclidean_dist(current_instances_prev[k1]['kalman_bbox'],v['kalman_bbox']))
if cost_center > 5:
cost_center = 1e8
#feature_cost = 1 - cos(v1['mean'], v['mean'])
#if k1 in current_instances_prev:
# feature_cost = min(feature_cost, 1 - cos(current_instances_prev[k1]['mean'], v['mean']))
#if feature_cost > 0.5:
# feature_cost = 1e8
costs = torch.tensor([0, 0, cost_center, 0])
for idx, a_w in enumerate(association_weights):
association_costs[i, j] += a_w * costs[idx]
else:
association_costs[i, j] = 1e8
if i == 0:
current_ids.append(k1)
idxes_1, idxes_2 = linear_sum_assignment(association_costs.cpu().detach())
associations = []
for i1, i2 in zip(idxes_1, idxes_2):
# max_cost = torch.sum((previous_instances[prev_ids[i1]]['var'][0,-3:]/2)**2)
if association_costs[i1][i2] < 1e8:
associations.append((prev_ids[i1], current_ids[i2]))
return association_costs, associations
def associate_instances_overlapping_frames(previous_ins_label, current_ins_label):
previous_instance_ids, c_p = np.unique(previous_ins_label, return_counts=True)
current_instance_ids, c_c = np.unique(current_ins_label, return_counts=True)
previous_instance_ids = [x for i,x in enumerate(previous_instance_ids) if c_p[i] > 25] #
current_instance_ids = [x for i, x in enumerate(current_instance_ids) if c_c[i] > 50] #
p_n = len(previous_instance_ids) -1
c_n = len(current_instance_ids) -1
prev_ids = []
current_ids = []
association_costs = torch.zeros(p_n, c_n)
for i, p_id in enumerate(previous_instance_ids[1:]):
prev_ids.append(p_id)
for j, c_id in enumerate(current_instance_ids[1:]):
intersection = np.sum( (previous_ins_label==p_id) & (current_ins_label == c_id) )
union = np.sum(previous_ins_label==p_id) + np.sum(current_ins_label == c_id) - intersection
iou = intersection/union
cost = 1 - iou
association_costs[i, j] = cost if cost < 0.50 else 1e8
if i == 0:
current_ids.append(c_id)
idxes_1, idxes_2 = linear_sum_assignment(association_costs.cpu().detach())
associations = []
association_costs_matched = []
for i1, i2 in zip(idxes_1, idxes_2):
if association_costs[i1][i2] < 1e8:
associations.append((prev_ids[i1], current_ids[i2]))
association_costs_matched.append(association_costs[i1][i2])
return association_costs_matched, associations
def main(FLAGS):
data_cfg = 'data/SemanticKitti/semantic-kitti.yaml'
DATA = yaml.safe_load(open(data_cfg, 'r'))
split = 'valid'
dataset = 'data/SemanticKitti'
prediction_dir = FLAGS.predictions
if split == 'valid':
prediction_path = '{}/val_probs'.format(prediction_dir)
else:
prediction_path = '{}/probs'.format(prediction_dir)
n_test_frames = FLAGS.n_test_frames
association_weights = [FLAGS.iou_3d, FLAGS.iou_2d, FLAGS.center, FLAGS.feature]
association_names = ['3d', '2d', 'cen', 'fet']
assoc_saving = [asc_type for idx, asc_type in enumerate(association_names) if
association_weights[idx] > 0]
assoc_saving.append(str(n_test_frames))
assoc_saving = '_'.join(assoc_saving)
save_path = '{}/stitch'.format(prediction_dir)+assoc_saving
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(data_cfg, 'r') as stream:
doc = yaml.safe_load(stream)
learning_map_doc = doc['learning_map']
inv_learning_map_doc = doc['learning_map_inv']
inv_learning_map = np.zeros((np.max([k for k in inv_learning_map_doc.keys()]) + 1), dtype=np.int32)
for k, v in inv_learning_map_doc.items():
inv_learning_map[k] = v
# get number of interest classes, and the label mappings
# class
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
class_strings = DATA["labels"]
# make lookup table for mapping
# class
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
class_lut = np.zeros((maxkey + 100), dtype=np.int32)
class_lut[list(class_remap.keys())] = list(class_remap.values())
# class
ignore_class = [cl for cl, ignored in class_ignore.items() if ignored]
print("Ignoring classes: ", ignore_class)
# get test set
test_sequences = DATA["split"][split]
# get label paths
poses = []
test_sequences = FLAGS.sequences
for sequence in test_sequences:
calib = parse_calibration(os.path.join(dataset, "sequences", '{0:02d}'.format(sequence), "calib.txt"))
poses_f64 = parse_poses(os.path.join(dataset, "sequences", '{0:02d}'.format(sequence), "poses.txt"), calib)
poses.append([pose.astype(np.float32) for pose in poses_f64])
for poses_seq, sequence in zip(poses, test_sequences):
point_names = []
point_paths = os.path.join(dataset, "sequences", '{0:02d}'.format(sequence), "velodyne")
# populate the label names
seq_point_names = sorted(
[os.path.join(point_paths, fn) for fn in os.listdir(point_paths) if fn.endswith(".bin")])
point_names.extend(seq_point_names)
prev_instances = {}
overlap_history = {}
if not os.path.exists(os.path.join(save_path, 'sequences', '{0:02d}'.format(sequence))):
os.makedirs(os.path.join(save_path, 'sequences', '{0:02d}'.format(sequence)))
if not os.path.exists(os.path.join(save_path, 'sequences', '{0:02d}'.format(sequence), 'predictions')):
os.makedirs(os.path.join(save_path, 'sequences', '{0:02d}'.format(sequence), 'predictions'))
for idx, point_file in zip(range(len(point_names)), point_names):
times = []
times.append(time.time())
pose = poses_seq[idx]
#load current frame
sem_path = os.path.join(prediction_path, '{0:02d}_{1:07d}.npy'.format(sequence,idx))
ins_path = os.path.join(prediction_path, '{0:02d}_{1:07d}_i.npy'.format(sequence,idx))
fet_path = os.path.join(prediction_path, '{0:02d}_{1:07d}_f.npy'.format(sequence, idx))
label_sem_class = np.load(sem_path)
label_inst = np.load(ins_path)
frame_points = np.fromfile(point_file, dtype=np.float32)
points = frame_points.reshape((-1, 4))
hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
points = new_points[:, :3]
things = (label_sem_class < 9) & (label_sem_class > 0)
ins_ids = np.unique(label_inst * things)
if os.path.exists(fet_path):
features = np.load(fet_path, allow_pickle=True).tolist()
else:
features = {}
for ins_id in ins_ids:
features[ins_id] = torch.from_numpy(np.zeros((1,1)))
projections = do_range_projection(points)
points = torch.from_numpy(points)
new_instances = {}
label_inst = torch.from_numpy(label_inst.astype(np.int32))
# get instances from current frames to track
for ins_id in ins_ids:
if ins_id == 0:
continue
if int(ins_id) not in features:
ids = np.where(label_inst == ins_id)
label_inst[ids] = 0
continue
mean = features[int(ins_id)]
ids = np.where(label_inst == ins_id)
if ids[0].shape[0] < 25:
label_inst[ids] = 0
continue
(values, counts) = np.unique(label_sem_class[ids], return_counts=True)
inst_class = values[np.argmax(counts)]
new_ids = remove_outliers(points[ids])
new_ids = ids[0][new_ids]
bbox, kalman_bbox = get_bbox_from_points(points[ids])
tracker = KalmanBoxTracker(kalman_bbox, ins_id)
center = get_median_center_from_points(points[ids])
bbox_proj = get_2d_bbox(projections[:, new_ids])
new_instances[ins_id] = {'life': 5, 'bbox': bbox, 'bbox_proj': bbox_proj, 'center' : center, 'n_point':ids[0].shape[0],
'tracker': tracker, 'kalman_bbox': kalman_bbox, 'mean':mean, 'class' : inst_class}
new_instances_prev = {}
overlaps = {}
overlap_scores = {}
# if multi frame prediction
times.append(time.time()) # loading time
if idx > 0:
for i in range(1, n_test_frames):
if idx - i < 0:
continue
# load previous frames which are predicted with current frame in a multi-frame fashion
prev_inst_path = os.path.join(prediction_path, '{0:02d}_{1:07d}_{2}_i.npy'.format(sequence, idx-i, idx))
prev_sem_path = os.path.join(prediction_path,
'{0:02d}_{1:07d}.npy'.format(sequence, idx - i))
fet_path = os.path.join(prediction_path, '{0:02d}_{1:07d}_{2}_f.npy'.format(sequence, idx - i, idx))
if not os.path.exists(prev_inst_path):
continue
prev_inst = np.load(prev_inst_path)
prev_sem = np.load(prev_sem_path)
if os.path.exists(fet_path):
features = np.load(fet_path, allow_pickle=True).tolist()
else:
features = {}
for ins_id in ins_ids:
features[ins_id] = torch.from_numpy(np.zeros((1, 1)))
prev_inst_orig_path = os.path.join(prediction_path,
'{0:02d}_{1:07d}_i.npy'.format(sequence, idx - i))
prev_inst_orig = np.load(prev_inst_orig_path)
things = (prev_sem < 9) & (prev_sem > 0)
# associate instances from previous frame pred_n and current prediction which contain pred_n, pred_n+1
association_costs, associations = associate_instances_overlapping_frames(prev_inst_orig* things, prev_inst* things)
for cost, (id1, id2) in zip(association_costs, associations):
if id2 not in overlaps:
overlap_scores[id2] = cost
elif overlap_scores[id2] > cost:
continue
elif overlap_scores[id2] < cost:
overlap_scores[id2] = cost
if id1 in overlap_history: #get track id of instance from previous frame
id1 = overlap_history[id1]
overlaps[id2] = id1
prev_point_path = os.path.join(dataset, "sequences", '{0:02d}'.format(int(sequence)), "velodyne", '{0:06d}.bin'.format(idx-i))
#pose = poses[0][idx-i]
frame_points = np.fromfile(prev_point_path, dtype=np.float32)
points = frame_points.reshape((-1, 4))
hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
points = new_points[:, :3]
points = torch.from_numpy(points)
projections = do_range_projection(points.cpu().detach().numpy())
prev_inst = torch.from_numpy(prev_inst.astype(np.int32))
# add instances for assocaition which are not overlapped
ins_ids = np.unique(prev_inst * things)
for ins_id in ins_ids:
if ins_id == 0:
continue
if int(ins_id) not in features:
ids = np.where(prev_inst == ins_id)
prev_inst[ids] = 0
continue
ids = np.where(prev_inst == ins_id)
if ids[0].shape[0] < 25:
prev_inst[ids] = 0
continue
mean = features[int(ins_id)]
new_ids = remove_outliers(points[ids])
new_ids = ids[0][new_ids]
(values, counts) = np.unique(prev_sem[ids], return_counts=True)
inst_class = values[np.argmax(counts)]
bbox, kalman_bbox = get_bbox_from_points(points[ids])
center = get_median_center_from_points(points[ids])
bbox_proj = get_2d_bbox(projections[:, new_ids])
tracker = KalmanBoxTracker(kalman_bbox, ins_id)
new_instances_prev[ins_id] = {'life': 5, 'bbox': bbox, 'bbox_proj': bbox_proj,
'tracker': tracker, 'kalman_bbox': kalman_bbox, 'mean': mean,
'center':center, 'class' : inst_class}
times.append(time.time()) # overlap associate times
#if len(prev_instances.keys()) > 0:
# association_costs, associations = associate_instances(prev_instances, new_instances, overlaps,
# torch.from_numpy(pose), association_weights)
associations = []
times.append(time.time()) # assoc time from prev
# if there was instances from previous frames
if len(prev_instances.keys()) > 0:
#firstly associate overlapping instances
for (new_id, prev_id) in overlaps.items():
ins_points = torch.where((label_inst == new_id))
if not new_id in new_instances or prev_id not in prev_instances:
continue
overlap_history[new_id] = prev_id#add tracking id
label_inst[ins_points[0]] = prev_id
prev_instances[prev_id]['bbox_proj'] = new_instances[new_id]['bbox_proj']
prev_instances[prev_id]['mean'] = new_instances[new_id]['mean']
prev_instances[prev_id]['center'] = new_instances[new_id]['center']
prev_instances[prev_id]['life'] += 1
prev_instances[prev_id]['tracker'].update(new_instances[new_id]['kalman_bbox'], prev_id)
prev_instances[prev_id]['kalman_bbox'] = torch.from_numpy(prev_instances[prev_id]['tracker'].get_state()).float()
prev_instances[prev_id]['bbox'] = kalman_box_to_eight_point(prev_instances[prev_id]['kalman_bbox'])
del new_instances[new_id]
for prev_id, new_id in associations:
if new_id in overlaps:
continue
# associate instances which are not overlapped
ins_points = torch.where((label_inst == new_id))
label_inst[ins_points[0]] = prev_id
overlap_history[new_id] = prev_id
prev_instances[prev_id]['bbox_proj'] = new_instances[new_id]['bbox_proj']
prev_instances[prev_id]['mean'] = new_instances[new_id]['mean']
prev_instances[prev_id]['center'] = new_instances[new_id]['center']
prev_instances[prev_id]['life'] += 1
prev_instances[prev_id]['tracker'].update(new_instances[new_id]['kalman_bbox'], prev_id)
prev_instances[prev_id]['kalman_bbox'] = torch.from_numpy(prev_instances[prev_id]['tracker'].get_state()).float()
prev_instances[prev_id]['bbox'] = kalman_box_to_eight_point(prev_instances[prev_id]['kalman_bbox'])
del new_instances[new_id]
for ins_id, instance in new_instances.items(): # add new instances to history
ids = np.where(label_inst == ins_id)
if ids[0].shape[0] < 50:
continue
prev_instances[ins_id] = instance
# kill instances which are not tracked for a while
dont_track_ids = []
for ins_id in prev_instances.keys():
if prev_instances[ins_id]['life'] == 0:
dont_track_ids.append(ins_id)
prev_instances[ins_id]['life'] -= 1
for ins_id in dont_track_ids:
del prev_instances[ins_id]
times.append(time.time()) # updating ids
ins_preds = label_inst.cpu().numpy()
#clean instances which have too few points
for ins_id in np.unique(ins_preds):
if ins_id == 0:
continue
valid_ind = np.argwhere(ins_preds == ins_id)[:, 0]
ins_preds[valid_ind] = ins_id+20
if valid_ind.shape[0] < 25:
ins_preds[valid_ind] = 0
for sem_id in np.unique(label_sem_class):
if sem_id < 1 or sem_id > 8:
valid_ind = np.argwhere((label_sem_class == sem_id) & (ins_preds == 0))[:, 0]
ins_preds[valid_ind] = sem_id
#write instances to label file which is binary
ins_preds = ins_preds.astype(np.int32)
new_preds = np.left_shift(ins_preds, 16)
sem_pred = label_sem_class.astype(np.int32)
inv_sem_labels = inv_learning_map[sem_pred]
new_preds = np.bitwise_or(new_preds,inv_sem_labels)
new_preds.tofile('{}/{}/{:02d}/predictions/{:06d}.label'.format(save_path, 'sequences', sequence, idx))
times.append(time.time()) #writing time
#print ('load, overlap, assoc, update, write')
#for i in range(1, len(times)):
# print (times[i]-times[i-1])
print("{}/{} ".format(idx, len(point_names)), end="\r", flush=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser("./stitch_tracklets.py")
parser.add_argument(
'--n_test_frames',
'-n',
type=int,
default=1
)
parser.add_argument(
'--3d', '-3d',
dest='iou_3d',
type=float,
default=0
)
parser.add_argument(
'--2d', '-2d',
dest='iou_2d',
type=float,
default=0
)
parser.add_argument(
'--center', '-c',
dest='center',
type=float,
default=0
)
parser.add_argument(
'--feature', '-f',
dest='feature',
type=float,
default=0
)
parser.add_argument(
'--sequences', '-s',
dest='sequences',
type=str,
default='8'
)
parser.add_argument(
'--predictions', '-p',
dest='predictions',
type=str,
required=True
)
FLAGS, unparsed = parser.parse_known_args()
FLAGS.sequences = [int(x) for x in FLAGS.sequences.split(',')]
main(FLAGS)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
dummy_l = ListNode('i')
dummy_r = ListNode('i')
l, r = dummy_l, dummy_r
cur = head
while cur:
if cur.val < x:
l.next = cur
l = l.next
else:
r.next = cur
r = r.next
cur = cur.next
r.next = None
l.next = dummy_r.next
return dummy_l.next
|
"""
image sampler code is clone from
https://raw.githubusercontent.com/acil-bwh/ChestImagingPlatform/develop/cip_python/dcnn/data/data_processing.py
"""
import math
import vtk
import numpy as np
import SimpleITK as sitk
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import scipy.ndimage.interpolation as scipy_interpolation
class DataProcessing(object):
@classmethod
def resample_image_itk(
cls, image, output_size, output_type=None, interpolator=sitk.sitkBSpline
):
"""
Image resampling using ITK
:param image: simpleITK image
:param output_size: numpy array or tuple. Output size
:param output_type: simpleITK output data type. If None, use the same as 'image'
:param interpolator: simpleITK interpolator (default: BSpline)
:return: tuple with simpleITK image and array with the resulting output spacing
"""
if not isinstance(output_size, np.ndarray):
output_size = np.array(output_size)
factor = np.asarray(image.GetSize()) / output_size.astype(np.float32)
output_spacing = np.asarray(image.GetSpacing()) * factor
resampler = sitk.ResampleImageFilter()
resampler.SetOutputDirection(image.GetDirection())
resampler.SetSize(output_size.tolist())
resampler.SetInterpolator(interpolator)
resampler.SetOutputSpacing(output_spacing)
resampler.SetOutputPixelType(
output_type if output_type is not None else image.GetPixelIDValue()
)
resampler.SetOutputOrigin(image.GetOrigin())
return resampler.Execute(image), output_spacing
@classmethod
def resample_image_itk_by_spacing(
cls, image, output_spacing, output_type=None, interpolator=sitk.sitkBSpline
):
"""
Image resampling using ITK
:param image: simpleITK image
:param output_spacing: numpy array or tuple. Output spacing
:param output_type: simpleITK output data type. If None, use the same as 'image'
:param interpolator: simpleITK interpolator (default: BSpline)
:return: tuple with simpleITK image and array with the resulting output spacing
"""
if not isinstance(output_spacing, np.ndarray):
output_spacing = np.array(output_spacing)
factor = np.asarray(image.GetSpacing()) / output_spacing.astype(np.float32)
output_size = np.round(np.asarray(image.GetSize()) * factor + 0.0005).astype(
np.uint32
)
resampler = sitk.ResampleImageFilter()
resampler.SetOutputDirection(image.GetDirection())
resampler.SetSize(output_size.tolist())
resampler.SetInterpolator(interpolator)
resampler.SetOutputSpacing(output_spacing)
resampler.SetOutputPixelType(
output_type if output_type is not None else image.GetPixelIDValue()
)
resampler.SetOutputOrigin(image.GetOrigin())
return resampler.Execute(image)
@classmethod
def resample_image_itk_by_spacing_and_size(
cls,
image,
output_spacing,
output_size,
output_type=None,
interpolator=sitk.sitkBSpline,
padding_value=-1024,
center_padding=True,
):
"""
Image resampling using ITK
:param image: simpleITK image
:param output_spacing: numpy array or tuple. Output spacing
:param output_size: numpy array or tuple. Output size
:param output_type: simpleITK output data type. If None, use the same as 'image'
:param interpolator: simpleITK interpolator (default: BSpline)
:param padding_value: pixel padding value when a transformed pixel is outside of the image
:return: tuple with simpleITK image and array with the resulting output spacing
"""
resampler = sitk.ResampleImageFilter()
resampler.SetOutputDirection(image.GetDirection())
resampler.SetSize(output_size)
resampler.SetDefaultPixelValue(padding_value)
resampler.SetInterpolator(interpolator)
resampler.SetOutputSpacing(np.array(output_spacing))
resampler.SetOutputPixelType(
output_type if output_type is not None else image.GetPixelIDValue()
)
factor = np.asarray(image.GetSpacing()) / np.asarray(output_spacing).astype(
np.float32
)
# Get new output origin
if center_padding:
real_output_size = np.round(
np.asarray(image.GetSize()) * factor + 0.0005
).astype(np.uint32)
diff = ((output_size - real_output_size) * np.asarray(output_spacing)) / 2
output_origin = np.asarray(image.GetOrigin()) - diff
else:
output_origin = np.asarray(image.GetOrigin())
resampler.SetOutputOrigin(output_origin)
return resampler.Execute(image)
@classmethod
def reslice_3D_image_vtk(
cls, image, x_axis, y_axis, z_axis, center_point, target_size, output_spacing
):
"""
3D image reslicing using vtk.
:param image: VTK image
:param x_axis: tuple. X direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param y_axis: tuple. Y direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param z_axis: tuple. Z direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param center_point: tuple. Axes center for the reslicing operation in RAS format.
:param target_size: tuple. Size of the output image.
:param output_spacing: tuple. Spacing of the output image.
:return: resliced vtk image in 3D
"""
reslice = vtk.vtkImageReslice()
reslice.SetInputData(image)
reslice.SetResliceAxesDirectionCosines(
x_axis[0],
x_axis[1],
x_axis[2],
y_axis[0],
y_axis[1],
y_axis[2],
z_axis[0],
z_axis[1],
z_axis[2],
)
reslice.SetResliceAxesOrigin(center_point)
reslice.SetOutputDimensionality(3)
reslice.SetInterpolationMode(vtk.VTK_RESLICE_CUBIC)
reslice.SetOutputSpacing(output_spacing)
reslice.SetOutputExtent(
0, target_size[0] - 1, 0, target_size[1] - 1, 0, target_size[2] - 1
)
reslice.SetOutputOrigin(
-(target_size[0] * 0.5 - 0.5) * output_spacing[0],
-(target_size[1] * 0.5 - 0.5) * output_spacing[1],
-(target_size[2] * 0.5 - 0.5) * output_spacing[2],
)
reslice.Update()
return reslice.GetOutput().GetPointData().GetScalars()
@classmethod
def reslice_2D_image_vtk(
cls, image, x_axis, y_axis, z_axis, center_point, target_size, output_spacing
):
"""
2D image reslicing using vtk.
:param image: VTK image
:param x_axis: tuple. X direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param y_axis: tuple. Y direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param z_axis: tuple. Z direction for vtk SetResliceAxesDirectionCosines function used to specify the orientation of the slice. The direction cosines give the x, y, and z axes for the output volume
:param center_point: tuple. Axes center for the reslicing operation in RAS format.
:param target_size: tuple. Size of the output image.
:param output_spacing: tuple. Spacing of the output image in x, y , z.
:return: resliced vtk image in 2D
"""
reslice = vtk.vtkImageReslice()
reslice.SetInputData(image)
reslice.SetResliceAxesDirectionCosines(
x_axis[0],
x_axis[1],
x_axis[2],
y_axis[0],
y_axis[1],
y_axis[2],
z_axis[0],
z_axis[1],
z_axis[2],
)
reslice.SetResliceAxesOrigin(center_point)
reslice.SetOutputDimensionality(2)
reslice.SetInterpolationMode(vtk.VTK_RESLICE_CUBIC)
reslice.SetOutputSpacing(output_spacing)
reslice.SetOutputExtent(0, target_size[0] - 1, 0, target_size[1] - 1, 0, 1)
reslice.SetOutputOrigin(
-(target_size[0] * 0.5 - 0.5) * output_spacing[0],
-(target_size[1] * 0.5 - 0.5) * output_spacing[1],
0,
)
reslice.Update()
return reslice.GetOutput().GetPointData().GetScalars()
@classmethod
def similarity_3D_transform_with_coords(
cls,
img,
coords,
output_size,
translation,
scale,
interpolator=sitk.sitkBSpline,
default_pixel_value=0.0,
):
"""
Apply a 3D similarity transform to an image and use the same transformation for a list of coordinates
(rotation not implemented at the moment)
:param img: simpleITK image
:param coords: numpy array of coordinates (Nx3) or None
:param output_size:
:param scale:
:param translation:
:return: tuple with sitkImage, transformed_coords
"""
reference_image = sitk.Image(output_size, img.GetPixelIDValue())
output_size_arr = np.array(output_size)
reference_image.SetOrigin(img.GetOrigin())
reference_image.SetDirection(img.GetDirection())
spacing = (
np.array(img.GetSize()) * np.array(img.GetSpacing())
) / output_size_arr
reference_image.SetSpacing(spacing)
# Create the transformation
tr = sitk.Similarity3DTransform()
if translation is not None:
tr.SetTranslation(translation)
if scale is not None:
tr.SetScale(scale)
# Apply the transformation to the image
img2 = sitk.Resample(
img, reference_image, tr, interpolator, default_pixel_value
)
if coords is not None:
# Apply the transformation to the coordinates
transformed_coords = np.zeros_like(coords)
for i in range(coords.shape[0]):
coords_ph = img.TransformContinuousIndexToPhysicalPoint(coords[i])
coords_ph = tr.GetInverse().TransformPoint(coords_ph)
transformed_coords[i] = np.array(
img2.TransformPhysicalPointToContinuousIndex(coords_ph)
)
else:
transformed_coords = None
return img2, transformed_coords
@classmethod
def scale_images(cls, img, output_size, return_scale_factors=False):
"""
Scale an array that represents one or more images into a shape
:param img: numpy array. It may contain one or multiple images
:param output_size: tuple of int. Shape expected (including possibly the number of images)
:param return_scale_factors: bool. If true, the result will be a tuple whose second values are the factors that
were needed to downsample the images
:return: numpy array rescaled or tuple with (array, factors)
"""
img_size = np.array(img.shape)
scale_factors = None
if not np.array_equal(output_size, img_size):
# The shape is the volume is different than the one expected by the network. We need to resize
scale_factors = output_size / img_size
# Reduce the volume to fit in the desired size
img = scipy_interpolation.zoom(img, scale_factors)
if return_scale_factors:
return img, scale_factors
return img
@classmethod
def standardization(cls, image_array, mean_value=-600, std_value=1.0, out=None):
"""
Standarize an image substracting mean and dividing by variance
:param image_array: image array
:param mean_value: float. Image mean value. If None, ignore
:param std_value: float. Image standard deviation value. If None, ignore
:return: New numpy array unless 'out' parameter is used. If so, reference to that array
"""
if out is None:
# Build a new array (copy)
image = image_array.astype(np.float32)
else:
# We will return a reference to out parameter
image = out
if id(out) != id(image_array):
# The input and output arrays are different.
# First, copy the source values, as we will apply the operations to image object
image[:] = image_array[:]
assert (
image.dtype == np.float32
), "The out array must contain float32 elements, because the transformation will be performed in place"
if mean_value is None:
mean_value = image.mean()
if std_value is None:
std_value = image.std()
if std_value <= 0.0001:
std_value = 1.0
# Standardize image
image -= mean_value
image /= std_value
return image
@classmethod
def normalize_CT_image_intensity(
cls,
image_array,
min_value=-300,
max_value=700,
min_output=0.0,
max_output=1.0,
out=None,
):
"""
Threshold and adjust contrast range in a CT image.
:param image_array: int numpy array (CT or partial CT image)
:param min_value: int. Min threshold (everything below that value will be thresholded). If None, ignore
:param max_value: int. Max threshold (everything below that value will be thresholded). If None, ignore
:param min_output: float. Min out value
:param max_output: float. Max out value
:param out: numpy array. Array that will be used as an output
:return: New numpy array unless 'out' parameter is used. If so, reference to that array
"""
clip = min_value is not None or max_value is not None
if min_value is None:
min_value = np.min(image_array)
if max_value is None:
max_value = np.max(image_array)
if out is None:
# Build a new array (copy)
image = image_array.astype(np.float32)
else:
# We will return a reference to out parameter
image = out
if id(out) != id(image_array):
# The input and output arrays are different.
# First, copy the source values, as we will apply the operations to image object
image[:] = image_array[:]
assert (
image.dtype == np.float32
), "The out array must contain float32 elements, because the transformation will be performed in place"
if clip:
np.clip(image, min_value, max_value, image)
# Change of range
image -= min_value
image /= max_value - min_value
image *= max_output - min_output
image += min_output
return image
@classmethod
def elastic_transform(cls, image, alpha, sigma, fill_mode="constant", cval=0.0):
"""
Elastic deformation of images as described in http://doi.ieeecomputersociety.org/10.1109/ICDAR.2003.1227801
:param image: numpy array
:param alpha: float
:param sigma: float
:param fill_mode: fill mode for gaussian filer. Default: constant value (cval)
:param cval: float
:return: numpy array. Image transformed
"""
random_state = np.random.RandomState(None)
shape = image.shape
dx = (
gaussian_filter(
(random_state.rand(*shape) * 2 - 1), sigma, mode=fill_mode, cval=cval
)
* alpha
)
dy = (
gaussian_filter(
(random_state.rand(*shape) * 2 - 1), sigma, mode=fill_mode, cval=cval
)
* alpha
)
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing="ij")
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
distorted_image = map_coordinates(image, indices, order=1).reshape(shape)
return distorted_image
@classmethod
def elastic_deformation_2D(
cls, image, grid_width=2, grid_height=2, magnitude=4, resampling="bicubic"
):
"""
Distorts a 2D image according to the parameters and returns the newly distorted image. Class taken from Augmentor methods
:param image:
:param grid_width: int. Grid width
:param grid_height: int. Grid height
:param magnitude: int. Magnitude
:param resampling: str. Resampling filter. Options: nearest (use nearest neighbour) |
bilinear (linear interpolation in a 2x2 environment) | bicubic (cubic spline interpolation in a 4x4 environment)
"""
image = Image.fromarray(image.transpose())
w, h = image.size
horizontal_tiles = grid_width
vertical_tiles = grid_height
width_of_square = int(math.floor(w / float(horizontal_tiles)))
height_of_square = int(math.floor(h / float(vertical_tiles)))
width_of_last_square = w - (width_of_square * (horizontal_tiles - 1))
height_of_last_square = h - (height_of_square * (vertical_tiles - 1))
dimensions = []
for vertical_tile in range(vertical_tiles):
for horizontal_tile in range(horizontal_tiles):
if vertical_tile == (vertical_tiles - 1) and horizontal_tile == (
horizontal_tiles - 1
):
dimensions.append(
[
horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_last_square + (horizontal_tile * width_of_square),
height_of_last_square + (height_of_square * vertical_tile),
]
)
elif vertical_tile == (vertical_tiles - 1):
dimensions.append(
[
horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_square + (horizontal_tile * width_of_square),
height_of_last_square + (height_of_square * vertical_tile),
]
)
elif horizontal_tile == (horizontal_tiles - 1):
dimensions.append(
[
horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_last_square + (horizontal_tile * width_of_square),
height_of_square + (height_of_square * vertical_tile),
]
)
else:
dimensions.append(
[
horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_square + (horizontal_tile * width_of_square),
height_of_square + (height_of_square * vertical_tile),
]
)
# For loop that generates polygons could be rewritten, but maybe harder to read?
# polygons = [x1,y1, x1,y2, x2,y2, x2,y1 for x1,y1, x2,y2 in dimensions]
# last_column = [(horizontal_tiles - 1) + horizontal_tiles * i for i in range(vertical_tiles)]
last_column = []
for i in range(vertical_tiles):
last_column.append((horizontal_tiles - 1) + horizontal_tiles * i)
last_row = range(
(horizontal_tiles * vertical_tiles) - horizontal_tiles,
horizontal_tiles * vertical_tiles,
)
polygons = []
for x1, y1, x2, y2 in dimensions:
polygons.append([x1, y1, x1, y2, x2, y2, x2, y1])
polygon_indices = []
for i in range((vertical_tiles * horizontal_tiles) - 1):
if i not in last_row and i not in last_column:
polygon_indices.append(
[i, i + 1, i + horizontal_tiles, i + 1 + horizontal_tiles]
)
for a, b, c, d in polygon_indices:
dx = np.random.randint(-magnitude, magnitude)
dy = np.random.randint(-magnitude, magnitude)
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[a]
polygons[a] = [x1, y1, x2, y2, x3 + dx, y3 + dy, x4, y4]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[b]
polygons[b] = [x1, y1, x2 + dx, y2 + dy, x3, y3, x4, y4]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[c]
polygons[c] = [x1, y1, x2, y2, x3, y3, x4 + dx, y4 + dy]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[d]
polygons[d] = [x1 + dx, y1 + dy, x2, y2, x3, y3, x4, y4]
generated_mesh = []
for i in range(len(dimensions)):
generated_mesh.append([dimensions[i], polygons[i]])
if resampling == "bilinear":
resampling_filter = Image.BILINEAR
elif resampling == "nearest":
resampling_filter = Image.NEAREST
else:
resampling_filter = Image.BICUBIC
return np.asarray(
image.transform(
image.size, Image.MESH, generated_mesh, resample=resampling_filter
)
).transpose()
@classmethod
def perspective_skew_2D_transform(
cls, image, skew_amount, skew_type="random", resampling="bicubic"
):
"""
Apply perspective skewing on images. Class taken from Augmentor methods
:param image:
:param skew_type: str. Skew type. Options: random | tilt (will randomly skew either left, right, up, or down.) |
tilt_top_buttton (skew up or down) | tilt_left_right (skew left or right) |
corner (will randomly skew one **corner** of the image either along the x-axis or y-axis.
This means in one of 8 different directions, randomly.
:param skew_amount: int. The degree to which the image is skewed
:param resampling: str. Resampling filter. Options: nearest (use nearest neighbour) |
bilinear (linear interpolation in a 2x2 environment) | bicubic (cubic spline interpolation in a 4x4 environment)
"""
image = Image.fromarray(image.transpose())
w, h = image.size
x1 = 0
x2 = h
y1 = 0
y2 = w
original_plane = [(y1, x1), (y2, x1), (y2, x2), (y1, x2)]
if skew_type == "random":
skew = np.random.choice(
["tilt", "tilt_left_right", "tilt_top_buttton", "corner"]
)
else:
skew = skew_type
# We have two choices now: we tilt in one of four directions
# or we skew a corner.
if skew == "tilt" or skew == "tilt_left_right" or skew == "tilt_top_buttton":
if skew == "tilt":
skew_direction = np.random.randint(0, 3)
elif skew == "tilt_left_right":
skew_direction = np.random.randint(0, 1)
elif skew == "tilt_top_buttton":
skew_direction = np.random.randint(2, 3)
if skew_direction == 0:
# Left Tilt
new_plane = [
(y1, x1 - skew_amount), # Top Left
(y2, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2 + skew_amount),
] # Bottom Left
elif skew_direction == 1:
# Right Tilt
new_plane = [
(y1, x1), # Top Left
(y2, x1 - skew_amount), # Top Right
(y2, x2 + skew_amount), # Bottom Right
(y1, x2),
] # Bottom Left
elif skew_direction == 2:
# Forward Tilt
new_plane = [
(y1 - skew_amount, x1), # Top Left
(y2 + skew_amount, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2),
] # Bottom Left
elif skew_direction == 3:
# Backward Tilt
new_plane = [
(y1, x1), # Top Left
(y2, x1), # Top Right
(y2 + skew_amount, x2), # Bottom Right
(y1 - skew_amount, x2),
] # Bottom Left
if skew == "corner":
skew_direction = np.random.randint(0, 7)
if skew_direction == 0:
# Skew possibility 0
new_plane = [(y1 - skew_amount, x1), (y2, x1), (y2, x2), (y1, x2)]
elif skew_direction == 1:
# Skew possibility 1
new_plane = [(y1, x1 - skew_amount), (y2, x1), (y2, x2), (y1, x2)]
elif skew_direction == 2:
# Skew possibility 2
new_plane = [(y1, x1), (y2 + skew_amount, x1), (y2, x2), (y1, x2)]
elif skew_direction == 3:
# Skew possibility 3
new_plane = [(y1, x1), (y2, x1 - skew_amount), (y2, x2), (y1, x2)]
elif skew_direction == 4:
# Skew possibility 4
new_plane = [(y1, x1), (y2, x1), (y2 + skew_amount, x2), (y1, x2)]
elif skew_direction == 5:
# Skew possibility 5
new_plane = [(y1, x1), (y2, x1), (y2, x2 + skew_amount), (y1, x2)]
elif skew_direction == 6:
# Skew possibility 6
new_plane = [(y1, x1), (y2, x1), (y2, x2), (y1 - skew_amount, x2)]
elif skew_direction == 7:
# Skew possibility 7
new_plane = [(y1, x1), (y2, x1), (y2, x2), (y1, x2 + skew_amount)]
# To calculate the coefficients required by PIL for the perspective skew,
# see the following Stack Overflow discussion: https://goo.gl/sSgJdj
matrix = []
for p1, p2 in zip(new_plane, original_plane):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(original_plane).reshape(8)
perspective_skew_coefficients_matrix = np.dot(np.linalg.pinv(A), B)
perspective_skew_coefficients_matrix = np.array(
perspective_skew_coefficients_matrix
).reshape(8)
if resampling == "bilinear":
resampling_filter = Image.BILINEAR
elif resampling == "nearest":
resampling_filter = Image.NEAREST
else:
resampling_filter = Image.BICUBIC
return np.asarray(
image.transform(
image.size,
Image.PERSPECTIVE,
perspective_skew_coefficients_matrix,
resample=resampling_filter,
)
).transpose()
|
from django.http import HttpResponse
from django.utils import simplejson
from google.appengine.api import urlfetch
import urllib
import string
def vat(request, country_code, vat_no):
country_code = string.upper(country_code)
data=dict(ms=country_code, iso=country_code, vat=vat_no)
payload = urllib.urlencode(data)
result = urlfetch.fetch(url='http://ec.europa.eu/taxation_customs/vies/viesquer.do', payload=payload, method=urlfetch.POST, headers={'Content-Type': 'application/x-www-form-urlencoded'})
content = result.content
json = 'true' if 'Yes, valid VAT number' in content else 'false'
if 'Member State service unavailable.' in content:
# Error, the member state service is not available
json = simplejson.dumps(dict(error=True, error_code=1, error_message='Member State service unavailable.'))
if 'callback' in request.GET:
json = request['callback'] + '(' + json + ')'
response = HttpResponse(json, mimetype='application/javascript')
return response
|
import pandas as pd
import requests
from io import StringIO
from annotations.cache_utils import cache_data_table
URL = "https://www.genenames.org/cgi-bin/download/custom?col=gd_hgnc_id&col=gd_app_sym&col=gd_app_name&col=gd_status&col=gd_prev_sym&col=gd_aliases&col=gd_pub_chrom_map&col=gd_pub_acc_ids&col=gd_pub_refseq_ids&col=gd_pub_ensembl_id&col=gd_mgd_id&col=gd_pubmed_ids&col=gd_locus_type&status=Approved&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit"
@cache_data_table
def get_hgnc_table():
"""Download the HGNC table from https://www.genenames.org/download/custom/ and return it as a pandas DataFrame"""
r = requests.get(URL)
if not r.ok:
raise Exception(f"Failed to download {URL}: {r}")
table_contents = StringIO(r.content.decode('UTF-8'))
return pd.read_table(table_contents)
if __name__ == "__main__":
pd.set_option('display.max_columns', 500)
df = get_hgnc_table()
print(df)
|
# imported libraries
import random
# method: board which holds values of each position, not what is displayed in game
# input: x,y range and num bombs
# output: built board with bombs in place
# effects: none
def build_board(x, y, num):
board = []
for i in range(y):
row = []
for j in range(x):
row.append(0)
board.append(row)
# Find a random position for all bombs
for j in range(num):
a = int(random.uniform(0, y))
b = int(random.uniform(0, x))
# if a bomb exists in the given spot, find another x,y
while board[a][b] == -1:
a = int(random.uniform(0, y))
b = int(random.uniform(0, x))
board[a][b] = -1
return board
# method: build a board to display
# input: x,y range
# output: board with nothing showing inside
# effects: none
def build_psuedo_board(x, y):
board = []
for i in range(y):
row = []
for j in range(x):
row.append(" ")
board.append(row)
return board
# method: Defined print formatting for printing the board with numeric position
# input: board
# output: print formatted layout for board
# effects: none
def print_board(board):
print(" ", end="")
for i in range(len(board[0])):
print(" ", i, " ", sep = "", end = "")
print("\n ", end = "")
print("--- " * len(board) + "---")
for i in range(len(board)):
print(i,"| ", end = "")
for j in range(len(board[0])):
print("%2s" % str(board[i][j]), end = " ")
print("|")
print(" ", "--- " * len(board) + "---\n\n", sep="")
# method: get a value for any position != -1, i.e. if 1 or 2 ... 8 bombs surround a position
# input: board
# output: none
# effects: change values of board to show how many bombs are adjacent
def define_board(board):
for i in range(len(board)):
for j in range(len(board[0])):
# if a bomb is found, add 1 to every position adjacent to i,j
if board[i][j] == -1:
k = i - 1
while k <= i + 1:
l = j - 1
while l <= j + 1:
if k >= 0 and l >= 0 and k < len(board) and l < len(board[0]) and board[k][l] != -1:
board[k][l] += 1
l += 1
k += 1
# method: check if a position contains a bomb
# input: board, x,y positon
# output: boolean if a bomb is in place
# effects: none
def check_move(board, x, y):
if board[x][y] == -1:
return False
else:
return True
# method: Check if the board is completed and the user won
# input: board, shown board
# output: boolean, if a board is fully filled
# effects: none
def check_win(board, psuedo):
for i in range(len(psuedo)):
for j in range(len(psuedo[0])):
# if a position is not filled, game is not over
if psuedo[i][j] == " " and board[i][j] != -1:
return 0
if psuedo[i][j] == 'f' and board[i][j] != -1:
return 0
return 1
# method: Recursive method to show all zeros/near-digits in a reltaed area
# input: board, shown board, x,y positon
# output: shown board
# effects: modifies shown board by revealing zeros
def open_area(board, psuedo, x, y):
# if a position is not a zero, then show it and backtrack
if board[x][y] != 0:
psuedo[x][y] = board[x][y]
return psuedo
psuedo[x][y] = board[x][y]
# check every position surrounding x,y
for i in range(x - 1, x + 2):
for j in range(y - 1, y + 2):
if i < len(psuedo) and j < len(psuedo[i]) and i >= 0 and j >= 0:
if psuedo[i][j] == ' ' or psuedo[i][j] == "1":
open_area(board, psuedo, i, j)
# main game loop
if __name__ == "__main__":
# generate a game board in a dynamic size with n bombs
game_info = input("Enter size and bombs: ")
x, y, num = game_info.split(" ")
# create 2 boards, one to be displayed and one holding bomb values
game = build_board(int(x), int(y), int(num))
psuedo = build_psuedo_board(int(x), int(y))
define_board(game)
print_board(game)
print("Valid Move: \'x y f\'; f value is optional")
# start main game loop, ask for a position to check and either place a flag or reveal if a bomb exists
while(True):
print_board(psuedo)
# keep requesting a move until a valid move is given
while(True):
# if more than 2 values given, break
try:
move = input("Move: ")
if len(move) == 5:
y, x, f = move.split(" ")
else:
y, x = move.split(" ")
f = ""
x, y = int(x), int(y)
break
except:
print("ERROR: Only (2) integers allowed.")
continue
# if a 'flag' flag is given then place a flag
if f != "":
if psuedo[x][y] == "f":
psuedo[x][y] = ""
else:
psuedo[x][y] = "f"
continue
# Check whether the given position contains a bomb or not
result = check_move(game, x, y)
# if not, check if it is adjacent to a bomb or a zero
if result:
psuedo[x][y] = game[x][y]
# open up the surrounding positions if a zero
if game[x][y] == 0:
open_area(game, psuedo, x, y)
# if x,y contains a bomb, then the user losses the game
else:
print("Game Over")
print_board(game)
break
# check whether the user has won the game
if check_win(game, psuedo):
print("You Win!")
print_board(game)
break
|
import chainer
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF
# Probabilistic model #
T = 20
driving_noise = 1.
measure_noise = 0.5
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'x0')
b = BetaVariable(0.5, 1., 'b')
x = [x0]
y = [y0]
x_names = ["x0"]
y_names = ["y0"]
for t in range(1,T):
x_names.append("x{}".format(t))
y_names.append("y{}".format(t))
x.append(NormalVariable(b*x[t-1], driving_noise, x_names[t]))
y.append(NormalVariable(x[t], measure_noise, y_names[t]))
AR_model = ProbabilisticModel(x + y)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
true_b = data[b].data
print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]
# Autoregressive variational distribution #
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
logit_b_post = RootVariable(0., 'logit_b_post', learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
for t in range(1, T):
Qx_mean.append(RootVariable(0., x_names[t] + "_mean", learnable=True))
Qx.append(NormalVariable(logit_b_post*Qx[t-1] + Qx_mean[t], 1., x_names[t], learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=200,
number_samples=100,
optimizer='Adam',
lr=0.05)
loss_list = AR_model.diagnostics["loss curve"]
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
b_posterior_samples = posterior_samples[b].detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
x_mean = []
lower_bound = []
upper_bound = []
for xt in x:
x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
mean = np.mean(x_posterior_samples)
sd = np.sqrt(np.var(x_posterior_samples))
x_mean.append(mean)
lower_bound.append(mean - sd)
upper_bound.append(mean + sd)
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.scatter(range(T), time_series, c="k")
ax1.plot(range(T), x_mean)
ax1.plot(range(T), ground_truth, c="k", ls ="--", lw=1.5)
ax1.fill_between(range(T), lower_bound, upper_bound, alpha=0.5)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0, 1)
plt.show()
|
# Keyboard
rand_keyboard = open('random3.txt','w')
keyboard = 'QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm'
text_length = 10000
line_length = 15
index = 0
output = ''
for i in range(text_length):
for j in range(line_length):
index %= len(keyboard)
output += keyboard[index]
index += 1
output += '\n'
rand_keyboard.write(output)
|
from flask import jsonify
from schema import Schema
from pysite.base_route import APIView
from pysite.constants import ValidationTypes
from pysite.decorators import api_params
LIST_SCHEMA = Schema([{"test": str}])
DICT_SCHEMA = Schema({"segfault": str})
class TestParamsView(APIView):
path = "/testparams"
name = "testparams"
@api_params(schema=DICT_SCHEMA, validation_type=ValidationTypes.params)
def get(self, data):
return jsonify(data)
@api_params(schema=LIST_SCHEMA, validation_type=ValidationTypes.params)
def post(self, data):
jsonified = jsonify(data)
return jsonified
|
import argparse
from generate import generate_min_blep
from render import render_output
def main():
parser = argparse.ArgumentParser()
parser.add_argument('sample_rate',
type=int,
help="This is actualy labeled 'oversampling' in the "
"ExperimentalScene code, and I'm not actually sure what it "
"really represents.")
parser.add_argument('zero_crossings',
type=int,
help='The number of zero crossings in the generated blep. '
'This is effectively the ring-out of the Gibbs ripple.')
parser.add_argument('-f', '--output_format',
default='list',
help='The format of the minblep to be output')
args = parser.parse_args()
min_blep = generate_min_blep(args.zero_crossings, args.sample_rate)
render_output(min_blep, args.output_format)
if __name__ == '__main__':
main()
|
# project/tests/test_auth.py
import json, time
from project import db
from project.api.models import User
from project.tests.base import BaseTestCase
from project.tests.utils import add_user
class TestAuthBlueprint(BaseTestCase):
"""Tests for the Auth service."""
def test_user_registration(self):
"""Ensure successful user registration."""
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
username='justatest',
email='test@test.com',
password='123456'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully registered.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 201)
def test_user_registration_duplicate_email(self):
"""Ensure error is thrown if email has already been registered."""
add_user('test', 'test@test.com', 'test')
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
username='jeff',
email='test@test.com',
password='test'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn(
'Sorry. That user already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_user_registration_duplicate_username(self):
"""Ensure error is thrown if username has already been registered."""
add_user('test', 'test@test.com', 'test')
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
username='test',
email='test2@test.com',
password='test'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn(
'Sorry. That user already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_user_registration_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict()),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_user_registration_invalid_json_keys_no_username(self):
"""Ensure error is thrown if no username is included."""
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_user_registration_invalid_json_keys_no_email(self):
"""Ensure error is thrown if no email is included."""
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
username='justatest',
password='test'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_user_registration_invalid_json_keys_no_password(self):
"""Ensure error is thrown if no password is included."""
with self.client:
response = self.client.post(
'/api/v1/auth/register',
data=json.dumps(dict(
username='justatest',
email='test@test.com'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_registered_user_login(self):
"""Ensure successful user login."""
with self.client:
user = add_user('test', 'test@test.com', 'test')
response = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged in.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 200)
def test_not_registered_user_login(self):
"""Ensure error is thrown if unregistered user attempts login."""
with self.client:
response = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'User does not exist.')
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 404)
def test_valid_logout(self):
"""Ensure successful user logout."""
add_user('test', 'test@test.com', 'test')
with self.client:
# user login
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
# valid token logout
response = self.client.get(
'/api/v1/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged out.')
self.assertEqual(response.status_code, 200)
def test_invalid_logout_expired_token(self):
"""Ensure error is thrown if logout attempt with expired token."""
add_user('test', 'test@test.com', 'test')
with self.client:
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
# invalid token logout
time.sleep(4)
response = self.client.get(
'/api/v1/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Signature expired. Please log in again.'
)
self.assertEqual(response.status_code, 401)
def test_invalid_logout(self):
"""Ensure error is thrown if token is invalid."""
with self.client:
response = self.client.get(
'/api/v1/auth/logout',
headers=dict(Authorization='Bearer invalid'))
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Invalid token. Please log in again.')
self.assertEqual(response.status_code, 401)
def test_invalid_logout_inactive(self):
"""Ensure error is thrown if user is inactive."""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.active = False
db.session.commit()
with self.client:
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.get(
'/api/v1/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Something went wrong. Please contact us.')
self.assertEqual(response.status_code, 401)
def test_user_status(self):
"""Ensure auth token is sent when requesting user details."""
add_user('test', 'test@test.com', 'test')
with self.client:
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.get(
'/api/v1/auth/status',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['data'] is not None)
self.assertTrue(data['data']['username'] == 'test')
self.assertTrue(data['data']['email'] == 'test@test.com')
self.assertTrue(data['data']['active'] is True)
self.assertTrue(data['data']['created_at'])
self.assertEqual(response.status_code, 200)
def test_invalid_status(self):
"""Ensure error is thrown if user detail request is sent without valid token."""
with self.client:
response = self.client.get(
'/api/v1/auth/status',
headers=dict(Authorization='Bearer invalid'))
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Invalid token. Please log in again.')
self.assertEqual(response.status_code, 401)
def test_invalid_status_inactive(self):
"""Ensure error is thrown if user is inactive and tries to get status."""
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.active = False
db.session.commit()
with self.client:
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.get(
'/api/v1/auth/status',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Something went wrong. Please contact us.')
self.assertEqual(response.status_code, 401)
def test_add_user_inactive(self):
add_user('test', 'test@test.com', 'test')
# update user
user = User.query.filter_by(email='test@test.com').first()
user.active = False
db.session.commit()
with self.client:
resp_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
email='test@test.com',
password='test'
)),
content_type='application/json'
)
response = self.client.post(
'/api/v1/users',
data=json.dumps(dict(
username='myco',
email='myco@test.com',
password='test'
)),
content_type='application/json',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'Something went wrong. Please contact us.')
self.assertEqual(response.status_code, 401)
|
"""Contains entrypoint logic for running moduledependency as a program."""
import sys
import os
from .cli import ArgumentProcessor
from .executor import Executor
from .outputter import OutputterFactory
from . import MODULEDEPENDENCY_DIR
# Directory which stores all the outputters
OUTPUTTER_DIRECTORY = os.path.join(MODULEDEPENDENCY_DIR, "outputters")
def run():
"""Main entrypoint into moduledependency program."""
# Process command line arguments
argProcessor = ArgumentProcessor()
try:
argProcessor.process(sys.argv)
except BaseException as e:
sys.exit(str(e))
outputterFactory = OutputterFactory(OUTPUTTER_DIRECTORY)
# If an outputter was specified, try and load it
if argProcessor.outputterName:
# Get all arguments that may be for the outputter (options
# not recognised as being anythiing else by the argument processor)
outputter = outputterFactory.createOutputter(argProcessor.outputterName, **argProcessor.getOutputterArguments())
else:
outputter = None
# Create and configuration main dependency searcher
executor = Executor()
if outputter:
executor.setOutputter(outputter)
try:
executor.setMaximumDepth(argProcessor.maxDepth)
except KeyError:
pass
if not argProcessor.getOption("quiet"):
print("starting dependency extraction...")
# Search for dependencies in the specified directory
dependencies = executor.execute(argProcessor.projectDirectory)
if not argProcessor.getOption("quiet"):
print("...dependency extraction complete!")
|
# from app.models import Pitch,User
# from app import db
# def setUp(self):
# self.user_James = User(username = 'James',password = 'potato', email = 'james@ms.com')
# self.new_review = Review(movie_id=12345,movie_title='Review for movies',image_path="https://image.tmdb.org/t/p/w500/jdjdjdjn",movie_review='This movie is the best thing since sliced bread',user = self.user_James )
|
import random
def choose():
words=["umbrella","electricity","library","explanation","demonitisation","rainbow","flipkart","computer","elctronics","chemical"]
pick=random.choice(words)
return pick
def jumble(word):
jumbled="".join(random.sample(word,len(word)))
return jumbled
def play():
p1name=input("Enter the name of player 1: ")
p2name=input("Enter the name of player 2: ")
pp1=0
pp2=0
turn=0
while(1):
picked_word=choose()
qn=jumble(picked_word)
if(turn%2==0):
print(p1name,"its your turn")
print("Your question is: ",qn)
ans=input("Enter your answer: ")
if(ans==picked_word):
print("Your answer is correct. ")
pp1=pp1+1
print("Your score is: ",pp1)
else:
print("Sorry the correct answer is: ",picked_word)
c=int(input("Press 1 to continue the game and press 0 to quit the game ... "))
if(c==0):
print("Thanks",p1name,p2name)
print(p1name," Your score is: ",pp1)
print(p2name," Your score is: ",pp2)
break
else:
print(p2name,"its your turn")
print("Your question is: ",qn)
ans=input("Enter your answer: ")
if(ans==picked_word):
print("Your answer is correct. ")
pp2=pp2+1
print("Your score is: ",pp2)
else:
print("Sorry the correct answer is: ",picked_word)
c=int(input("Press 1 to continue the game and press 0 to quit the game ... "))
if(c==0):
print("Thanks",p1name,p2name)
print(p1name," Your score is: ",pp1)
print(p2name," Your score is: ",pp2)
break
turn=turn+1
play()
|
from pathlib import Path
_root = Path(__file__).resolve().parent
_constraints = _root.parent / 'constraints'
_content = _root / 'content'
(_content / 'boards').mkdir(exist_ok=True)
for item in (_constraints / 'board').glob('**/*.yml'):
if item.name != 'info.yml':
_name = item.stem
_prefix = '.todo/'
_suffix = '.yml'
print('·', _name)
else:
_name = item.parent.name
_prefix = ''
_suffix = ''
print('-', _name)
with (_content / 'boards' / (_name + '.md')).open('w') as wfptr:
wfptr.write('---\n')
wfptr.write(item.read_text())
wfptr.write('ref: https://github.com/hdl/constraints/tree/main/board/%s%s%s\n' % (_prefix, _name, _suffix))
wfptr.write('---\n')
|
from setuptools import setup
setup(name='HDMSpectra',
version='1.0',
description='Dark Matter Spectra from the Electroweak to the Planck Scale',
author='Nicholas L Rodd',
author_email='nrodd@berkeley.edu',
url='https://github.com/nickrodd/HDMSpectra',
license='MIT',
packages=['HDMSpectra'],
install_requires=['numpy', 'matplotlib', 'h5py', 'jupyter', 'scipy', 'six'],
)
|
import struct
#Get the variable length time that is represented as some fraction of a beat for beat tick format
#Assumptions: file buf starts at the beginning of the vlr
def get_vlv(file_buf, index):
end_index = index
vlv = 0
vlv_accumulator = 0
evaluation = 128
count = 0
while(evaluation >= 128):
dataout = int(struct.unpack('>B', file_buf[count])[0])
SEVEN_BIT_SHIFT = 7
evaluation = dataout
vlv = dataout & 0x7F
count += 1
vlv_accumulator = (vlv_accumulator << SEVEN_BIT_SHIFT) + vlv
end_index = end_index + count
return [vlv_accumulator, end_index]
|
FEATURE_SELECT_PARAMNAME_PREFIX = "feature_group"
ALWAYS_USED_FEATURE_GROUP_ID = -1
RANDOM_SCORE = 10
|
"""This module contains operations related to committing and reviewing changes done to requirements."""
from logging import Logger
from pipwatch_worker.core.data_models import Project
from pipwatch_worker.worker.commands import Git
from pipwatch_worker.worker.operations.operation import Operation
class CommitChanges(Operation): # pylint: disable=too-few-public-methods
"""Encompasses logic of committing changes made to requirements."""
DEFAULT_COMMIT_MSG = "[Pipwatch] - Automatic increment of requirements versions."
def __init__(self, logger: Logger, project_details: Project) -> None:
"""Initialize method instance."""
super().__init__(logger=logger, project_details=project_details)
self.git = Git(self.project_details.id, self.project_details.git_repository.url)
def __call__(self, commit_msg: str = None) -> None:
"""Commit changes and push them to master branch."""
for requirements_file in self.project_details.requirements_files:
self.log.debug("Attempting to 'git add {file}'".format(file=requirements_file.path))
self.git("add {file}".format(file=requirements_file.path))
commit_msg = commit_msg if commit_msg else self.DEFAULT_COMMIT_MSG
self.log.debug("Attempting to commit changes with following message: '{message}'".format(
message=commit_msg
))
self.git("commit -m {commit_msg}".format(commit_msg=commit_msg))
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import requests
from mock import Mock, patch
from pants.cache.resolver import Resolver, ResponseParser, RESTfulResolver
PATCH_OPTS = dict(autospec=True, spec_set=True)
class TestResponseParser(unittest.TestCase):
def testParse(self):
response_parser = ResponseParser()
self.assertEqual(['url1', 'url2'], response_parser.parse('{"hostlist": ["url1", "url2"]}'))
self.assertEqual([], response_parser.parse('{"hostlist": []}'))
with self.assertRaises(ResponseParser.ResponseParserError):
response_parser.parse('{"hostlist": "not a list"}')
with self.assertRaises(ResponseParser.ResponseParserError):
response_parser.parse('a garbage response')
with self.assertRaises(ResponseParser.ResponseParserError):
response_parser.parse('{"mismatched-index": ["url1", "url2"]}')
with self.assertRaises(ResponseParser.ResponseParserError):
# a mismatched encoding also fails
response_parser.parse('{"hostlist": ["url1", "url2"]}'.encode('utf-16'))
class TestRESTfulResolver(unittest.TestCase):
TEST_TIMEOUT = 1
TEST_RETRIES = 3
RESOLVED_URL_1 = 'http://10.0.0.1:1'
RESOLVED_URL_2 = 'http://10.0.0.2:2'
URLS = [RESOLVED_URL_1, RESOLVED_URL_2]
TEST_RESOLVED_FROM = 'http://test-resolver'
TEST_REQUEST_EXCEPTION = requests.exceptions.ConnectionError()
def setUp(self):
self.parser = Mock(spec=ResponseParser)
self.resolver = RESTfulResolver(self.TEST_TIMEOUT, self.TEST_RETRIES, self.parser)
def mock_response(self, status_code, urls=None):
response = Mock()
response.status_code = status_code
self.parser.parse = Mock(return_value=urls)
return response
def testResolveSuccess(self):
with patch.object(requests.Session, 'get', **PATCH_OPTS) as mock_get:
mock_get.return_value = self.mock_response(requests.codes.ok, urls=self.URLS)
self.assertEqual(self.URLS, self.resolver.resolve(self.TEST_RESOLVED_FROM))
def testResolveErrorEmptyReturn(self):
with patch.object(requests.Session, 'get', **PATCH_OPTS) as mock_get:
mock_get.return_value = self.mock_response(requests.codes.ok, urls=[])
with self.assertRaises(Resolver.ResolverError):
self.resolver.resolve(self.TEST_RESOLVED_FROM)
def testResolveParseError(self):
with patch.object(requests.Session, 'get', **PATCH_OPTS) as mock_get:
mock_get.return_value = self.mock_response(requests.codes.ok, urls='this is a garbage string')
self.parser.parse.side_effect = ResponseParser.ResponseParserError()
with self.assertRaises(Resolver.ResolverError):
self.resolver.resolve(self.TEST_RESOLVED_FROM)
def testResolveResponseError(self):
with patch.object(requests.Session, 'get', **PATCH_OPTS) as mock_get:
mock_get.return_value = self.mock_response(requests.codes.service_unavailable)
with self.assertRaises(Resolver.ResolverError):
self.resolver.resolve(self.TEST_RESOLVED_FROM)
def testResolveConnectionError(self):
with patch.object(requests.Session, 'get', **PATCH_OPTS) as mock_get:
mock_get.side_effect = self.TEST_REQUEST_EXCEPTION
with self.assertRaises(Resolver.ResolverError):
self.resolver.resolve(self.TEST_RESOLVED_FROM)
|
from base.BaseAlgorithm import BaseAlgorithm
import math
class InsertionLengthAlgorithm(BaseAlgorithm):
def __init__(self, name, path):
BaseAlgorithm.__init__(self, name, path)
def run(self):
self.lengthOfCoverage()
genome_insertion = []
sum = 0;
sumI = 0;
with open(self.path) as file:
for line in file:
if line.find("@") != -1:
continue
pieces = line.split("\t")
if int(pieces[8]) > 0 and int(pieces[8]) < 10000:
isize = abs(int(pieces[3])-int(pieces[7]))
genome_insertion.append(isize)
sum += isize
sumI += 1
file = "position,isize\n"
isizeM = int((sum / sumI))
tmp = 0
index = 1
for i in genome_insertion:
file += str(index)+","+str(i)+"\n"
dist = (i-isizeM)
# needs for the standard deviation
tmp += math.pow(dist,2)
index += 1
print "Saving genome_insert_csv.csv file..."
targetF = open("genome_insert_csv.csv","w")
targetF.truncate()
targetF.write(file)
targetF.close()
print "Media: "+str(isizeM)
print "Standard Dev: "+str(format(math.sqrt(tmp/(index)),'.2f'))
print("Done!")
|
# class Actions:
# def polish_single_phrase(phrase: Union[Phrase, str] = None):
# actions.mode.enable("user.polish_dictation")
# actions.mode.disable("command")
# if phrase:
# samples = extract_samples()
# speech_system._on_audio_frame(samples)
# actions.mode.disable("user.polish_dictation")
# actions.mode.enable("command")
|
from __future__ import division
def html_head_title(title):
return '''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>%s</title>
</head>
''' % title
|
import vaex.utils
import numpy as np
import pytest
def test_required_dtype_for_max():
assert vaex.utils.required_dtype_for_max(127, signed=True) == np.int8
assert vaex.utils.required_dtype_for_max(128, signed=True) == np.int16
assert vaex.utils.required_dtype_for_max(127, signed=False) == np.uint8
assert vaex.utils.required_dtype_for_max(128, signed=False) == np.uint8
assert vaex.utils.required_dtype_for_max(2**63-1, signed=True) == np.int64
assert vaex.utils.required_dtype_for_max(2**63, signed=False) == np.uint64
with pytest.raises(ValueError):
assert vaex.utils.required_dtype_for_max(2**63, signed=True) == np.int64
def test_dict_replace_key():
d = {'a': 1, 'b': 2}
result = vaex.utils.dict_replace_key(d, 'a', 'z')
assert list(result.items()) == [('z', 1), ('b', 2)]
|
"""Treadmill schedule monitor event.
This module allows the treadmill cron to take actions on applications,
for example, start and stop them at a given time.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import restclient
_LOGGER = logging.getLogger(__name__)
# TODO: this is hack, as this should be pass as option from cron sproc
# command line. I was not able to figure out how to pass context from
# cron/scheduler to the callback.
_API_URL = 'http+unix://%2Ftmp%2Fcellapi.sock'
def set_count(monitor_name=None, count=None):
"""Set the count on the supplied monitor"""
_LOGGER.debug('monitor: %s, count: %s', monitor_name, count)
if not monitor_name:
_LOGGER.error('No monitor name supplied, cannot continue')
return
restclient.post(
[_API_URL],
'/app-monitor/{}'.format(monitor_name),
payload={'count': count},
headers={'X-Treadmill-Trusted-Agent': 'cron'}
)
|
import pytest
from energym.simulators.eplus_old import EnergyPlus
from energym.envs.eplus_env import EplusEnv
import energym.utils.rewards as R
from energym.utils.wrappers import NormalizeObservation, MultiObsWrapper
from opyplus import Epm, WeatherData
import os
import pkg_resources
from glob import glob #to find directories with patterns
import shutil
############### ROOT DIRECTORY ###############
@pytest.fixture(scope="session")
def energym_path():
return os.path.abspath(os.path.join(pkg_resources.resource_filename('energym', ''), os.pardir))
############### SIMULATORS ###############
@pytest.fixture(scope="session")
def eplus_path():
return os.environ["EPLUS_PATH"]
@pytest.fixture(scope="session")
def bcvtb_path():
return os.environ["BCVTB_PATH"]
@pytest.fixture(scope="session")
def pkg_data_path():
return pkg_resources.resource_filename('energym', 'data/')
@pytest.fixture(scope="session")
def idf_path(pkg_data_path):
return os.path.join(pkg_data_path, 'buildings', "5ZoneAutoDXVAV.idf")
@pytest.fixture(scope="session")
def variable_path(pkg_data_path):
return os.path.join(pkg_data_path, 'variables', "variables.cfg")
@pytest.fixture(scope="session")
def weather_path(pkg_data_path):
return os.path.join(pkg_data_path, 'weather', "USA_PA_Pittsburgh-Allegheny.County.AP.725205_TMY3.epw")
@pytest.fixture(scope="session")
def simulator(eplus_path, bcvtb_path,idf_path,variable_path,weather_path):
env_name="TEST"
return EnergyPlus(eplus_path, weather_path, bcvtb_path, variable_path, idf_path, env_name,act_repeat=1,max_ep_data_store_num = 10)
############### ENVIRONMENTS ANDD WRAPPERS###############
@pytest.fixture(scope="module")
def env_demo(idf_path, weather_path, variable_path):
idf_file=idf_path.split("/")[-1]
weather_file=weather_path.split("/")[-1]
variables_file=variable_path.split("/")[-1]
return EplusEnv(env_name="TESTGYM",idf_file=idf_file, weather_file=weather_file, variables_file=variables_file, discrete_actions=True, weather_variability=None)
@pytest.fixture(scope="module")
def env_wrapper(env_demo):
return NormalizeObservation(MultiObsWrapper(env=env_demo,n=5))
############### COMMONS ###############
@pytest.fixture(scope="session")
def epm(idf_path):
return Epm.from_idf(idf_path)
@pytest.fixture(scope="session")
def weather_data(weather_path):
return WeatherData.from_epw(weather_path)
############### REWARDS ###############
@pytest.fixture(scope="session")
def simple_reward():
return R.SimpleReward(
range_comfort_winter = (20.0, 23.5),
range_comfort_summer = (23.0, 26.0),
energy_weight = 0.5,
lambda_energy = 1e-4,
lambda_temperature = 1.0
)
############### WHEN TESTS HAVE FINISHED ###############
def pytest_sessionfinish(session, exitstatus):
""" whole test run finishes. """
# Deleting all temporal directories generated during tests
directories=glob("Eplus-env-TEST*/")
for directory in directories:
shutil.rmtree(directory)
#Deleting new random weather files once it has been checked
files=glob("energym/data/weather/*Random*.epw")
for file in files:
os.remove(file)
|
"""
This example file is intended to demonstrate how to use SHTK to mount a Debian
or Ubuntu image and install vim inside it via chroot. All mount points are
automatically unmounted after success or failure of installation.
Args:
image_path (str): Path of the image to mount
mount_path (str): Path of the directory to mount on
"""
import contextlib
import pathlib
import sys
import shtk
class Mount:
"""
Manages a mount. Works so long as Python doesn't segfault or similar.
Args:
src_path (str or pathlib.Path): The device to mount from
dst_path (str or pathlib.Path): The directory to mount on
Raises:
shtk.NonzeroExitCodeException:
If the mount or unmount returns a non-zero exit code.
"""
def __init__(self, src_path, dst_path, options=[]):
self.src_path = str(src_path)
self.dst_path = str(dst_path)
self.options = list(options)
def __enter__(self):
sh = shtk.Shell.get_shell()
mount = sh.command('mount', user='root')
sh(mount(*self.options, "--", self.src_path, self.dst_path))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sh = shtk.Shell.get_shell()
umount = sh.command('umount', user='root')
sh(umount('-l', self.dst_path))
@contextlib.contextmanager
def PrepChroot(image_path, mount_path):
"""
Mounts an image and prepares it for chroot usage
Args:
image_path (pathlib.Path or str): The image file to mount.
mount_path (pathlib.Path or str): The directory on which to mount
the image.
Raises:
shtk.NonzeroExitCodeException:
If any mount or unmount returns a non-zero exit code.
"""
image_path = pathlib.Path(image_path)
mount_path = pathlib.Path(mount_path)
with contextlib.ExitStack() as stack:
stack.enter_context(Mount(image_path, mount_path, options=('-o', 'loop')))
stack.enter_context(Mount('none', mount_path / 'proc', options=('-t', 'proc')))
stack.enter_context(Mount('none', mount_path / 'sys', options=('-t', 'sysfs')))
stack.enter_context(Mount('/dev', mount_path / 'dev', options=('--rbind',)))
stack.enter_context(Mount('devpts', mount_path / 'dev' / 'pts', options=('-t', 'devpts')))
stack.enter_context(Mount('/run', mount_path / 'run', options=('--rbind',)))
yield stack
def main(image_path, mount_path):
"""
Mounts an image and runs `chroot apt -y install vim`
Args:
image_path (pathlib.Path or str): The image file to mount.
mount_path (pathlib.Path or str): The directory on which to mount
the image.
Raises:
shtk.NonzeroExitCodeException:
If any mount or unmount returns a non-zero exit code.
"""
with shtk.Shell() as sh:
with PrepChroot(image_path, mount_path):
chroot = sh.command('chroot', user='root')
sh(chroot('apt', '-y', 'install', 'vim'))
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
# -*- coding: utf-8 -*-
"""
@author: vinay
"""
import numpy as np
import math
import matplotlib.pyplot as plt
x1 = np.linspace(0, 5/50, num=1000)
np.random.seed(0)
noisy_sine = np.sin(2*math.pi*50*x1) + np.random.normal(0,0.06,1000)
plt.subplot(3,1,1)
plt.plot(x1, noisy_sine, color = "cyan")
plt.xlim(0, 5/50)
plt.ylim(-1.5, 1.5)
plt.xticks(np.arange(0, 5/50, 0.005))
plt.xlabel('Time(sec)')
plt.ylabel('Amplitude')
plt.title('Noisy SINE')
plt.tight_layout()
plt.grid()
clean_sine = np.sin(2*math.pi*50*x1)
plt.subplot(3,1,2)
plt.plot(x1, clean_sine, color = "red")
plt.xlim(0, 5/50)
plt.ylim(-1.5, 1.5)
plt.xticks(np.arange(0, 5/50, 0.005))
plt.xlabel('Time(sec)')
plt.ylabel('Amplitude')
plt.title('Clean SINE')
plt.tight_layout()
plt.grid()
error = noisy_sine - clean_sine
plt.subplot(3,1,3)
plt.plot(x1, error, color = "blue")
plt.xlim(0, 5/50)
plt.ylim(-1.5, 1.5)
plt.xticks(np.arange(0, 5/50, 0.005))
plt.xlabel('Time(sec)')
plt.ylabel('Amplitude')
plt.title('Error')
plt.tight_layout()
plt.grid()
plt.show()
|
import re
import numpy as np
import pickle
from collections import defaultdict
from chainer.dataset.dataset_mixin import DatasetMixin
class LtrDataset(DatasetMixin):
"""
Implementation of Learning to Rank data set
Supports efficient slicing on query-level data. Note that single samples are
collections of query-document pairs represented as a tuple of matrix of
feature vectors and a vector of relevance scores
"""
def __init__(self, feature_vectors, relevance_scores, query_pointer,
query_ids, nr_of_queries):
self.feature_vectors = feature_vectors
self.relevance_scores = relevance_scores
self.query_pointer = query_pointer
self.query_ids = query_ids
self.nr_queries = nr_of_queries
def __len__(self):
"""
Returns the number of queries.
"""
return self.nr_queries
def get_example(self, i):
"""
Returns the i-th example.
Implementations should override it. It should raise :class:`IndexError`
if the index is invalid.
Args:
i (int): The index of the example.
Returns:
The i-th example.
"""
if i < 0 or i >= self.nr_queries:
raise IndexError
start = self.query_pointer[i]
end = self.query_pointer[i+1]
return LtrDataset(self.feature_vectors[start:end, :],
self.relevance_scores[start:end], np.zeros(1),
[self.query_ids[i]], 1)
def normalize(self):
for i in range(self.nr_queries):
start = self.query_pointer[i]
end = self.query_pointer[i+1]
self.feature_vectors[start:end, :] -= np.min(self.feature_vectors[start:end, :], axis=0)
maximum = np.max(self.feature_vectors[start:end, :], axis=0)
maximum[maximum == 0.0] = 1.0
self.feature_vectors[start:end, :] /= maximum
@classmethod
def load_txt(cls, file_handle, normalize=False):
"""
Loads a learning to rank dataset from a text file source
:param filepaths: A single file path as a string or a list of file paths
:return: A `class:dataset.dataset.LtrDataset` object
"""
# Iterate over lines in the file
data_set = defaultdict(list)
for line in file_handle:
# Extract the data point information
data_point = LtrDataPoint(line)
data_set[data_point.qid].append(data_point)
# Convert feature vectors, relevance scores and query pointer to correct
# form
query_ids = list(data_set.keys())
query_pointer = np.array([len(data_set[query]) for query in data_set])
query_pointer = np.cumsum(query_pointer)
query_pointer = np.hstack([np.array([0]), query_pointer])
nr_of_queries = len(data_set)
feature_vectors = np.vstack([data_point.feature_vector
for query in data_set
for data_point in data_set[query]])
relevance_scores = np.vstack([data_point.relevance
for query in data_set
for data_point in data_set[query]])
# Free memory
del data_set
# Generate object to return
result = LtrDataset(feature_vectors, relevance_scores, query_pointer,
query_ids, nr_of_queries)
# If normalization is necessary, do so
if normalize:
result.normalize()
# Cast to float32 (after normalization) which is typical format in
# chainer
result.feature_vectors = result.feature_vectors.astype(dtype=np.float32)
# Return result
return result
def save_txt(self, file_handle):
"""
Saves the data set in txt format to given file
:param file_handle: The file to save to
"""
for i in range(self.nr_queries):
start = self.query_pointer[i]
end = self.query_pointer[i + 1]
for j in range(start, end):
features = " ".join('{i}:{v}'.format(i=i,
v=self.feature_vectors[j, i])
for i in range(len(self.feature_vectors[j])))
out = '{r} qid:{qid} {features}\n'.format(r=self.relevance_scores[j,0],
qid=self.query_ids[i],
features=features)
file_handle.write(out)
def save(self, file_handle):
"""
Saves the data set in binary format to given file
:param file_handle: The file to save to
"""
pickle.dump(self, file_handle)
@classmethod
def load(cls, file_handle):
"""
Loads the data set in binary format from given file
:param file_handle: The file to load from
:return: A `class:dataset.dataset.LtrDataset` object
"""
return pickle.load(file_handle)
class LtrDataPoint:
"""
A single learning to rank data point, contains a query identifier, a
relevance label and a feature vector
"""
qid_regex = re.compile(".*qid:([0-9]+).*")
relevance_regex = re.compile("^[0-9]+")
feature_regex = re.compile("([0-9]+):([^ ]+)")
def __init__(self, line):
# Remove comment
comment_start = line.find("#")
if comment_start >= 0:
line = line[:comment_start]
# Extract qid
self.qid = re.search(LtrDataPoint.qid_regex, line).group(1)
self.relevance = re.search(LtrDataPoint.relevance_regex, line).group(0)
features = re.findall(LtrDataPoint.feature_regex, line)
minimum = min(int(index) for index, _ in features)
maximum = max(int(index) for index, _ in features)
self.feature_vector = np.zeros(1 + maximum - minimum)
for index, value in features:
self.feature_vector[int(index) - minimum] = float(value)
|
from .datasets.captioning import CaptioningDataset
from .datasets.masked_lm import MaskedLmDataset
from .datasets.multilabel import MultiLabelClassificationDataset
from .datasets.downstream import (
ImageNetDataset,
INaturalist2018Dataset,
VOC07ClassificationDataset,
ImageDirectoryDataset,
)
__all__ = [
"CaptioningDataset",
"MaskedLmDataset",
"MultiLabelClassificationDataset",
"ImageDirectoryDataset",
"ImageNetDataset",
"INaturalist2018Dataset",
"VOC07ClassificationDataset",
]
|
from bs4 import BeautifulSoup
import requests
url = "http://awoiaf.westeros.org/index.php/Main_Page"
r = requests.get(url)
soup = BeautifulSoup(r.text)
word_bank = set()
for link in soup.find_all('a'):
print(link)
word_bank.add(link.get('title'))
print(word_bank)
|
# -*- coding: utf-8 -*-
import csv
import os
csv_file = "test.csv"
with open(csv_file, "wt", newline="") as f: # 参数newline设置一行文本的结束字符
w = csv.writer(f, dialect="excel", delimiter="#") # 以excel方式打开,分隔符为“#”
w.writerow(("index", "char", "num")) # 插入一行数据
for i in range(3):
w.writerow((i + 1, chr(ord('a') + i), i * i))
with open(csv_file, "rt") as f:
r = csv.reader(f, delimiter='#')
# print(list(r))
for row in r:
print(row)
print(row[1])
os.remove(csv_file)
# ### 标准库csv模块
# - CSV File Reading and Writing
# - https://docs.python.org/3/library/csv.html
# - 源码文件: Python安装目录\Lib\csv.py;
|
import datetime
import logging
import orm
logging.basicConfig(level=logging.INFO)
class BotMixin(object):
pass
def new_timezone():
uu = datetime.datetime.now()
return uu
class RequestCache(object):
__tablename__ = "sheet_request_cache"
id = orm.Integer(primary_key=True)
request_id = orm.Text()
data = orm.JSON(default={})
|
import io
import os
import struct
from datetime import datetime
from enum import Enum
import numpy as np
import pandas as pd
import soundfile as sf
from PIL import Image
from dataclasses import dataclass
from .classifiers.classifier import Classifier
class DataType(Enum):
EMOTIONS = 1
EMOTIONS_FROM_RAW_DATA = 2
EMOTIONS_GROUPED = 3
EMOTIONS_FROM_RAW_DATA_GROUPED = 4
@dataclass
class EmotionData:
file_name: str
local_NN_name: str
robot_NN_name: str
classifier: Classifier
data_frame: pd.DataFrame = pd.DataFrame()
grouped_data_frame: pd.DataFrame = pd.DataFrame()
raw_data_data_frame: pd.DataFrame = pd.DataFrame()
grouped_raw_data_data_frame: pd.DataFrame = pd.DataFrame()
class DataSaver:
def __init__(self, video_nn, audio_nn):
self.save_data = False
self.directory_path = None
self.video = EmotionData("video_emotion_data.csv", video_nn.get_name(), "", video_nn,)
self.audio = EmotionData("audio_emotion_data.csv", audio_nn.get_name(), "", audio_nn)
self.MAX_NUMBER_OF_ROW = 20
def start_saving_data(self, directory_path):
print("Start")
self.save_data = True
self.directory_path = directory_path
def stop_saving_data(self):
self.save_data = False
def save_emotions(self, type, emotions, timestamp):
if type == "video":
data = self.video
else:
data = self.audio
emotions_with_timestamp = self.get_emotions_with_timestamp(emotions, timestamp)
data.data_frame = data.data_frame.append(emotions_with_timestamp,
ignore_index=True)
data.grouped_data_frame = data.grouped_data_frame.append(
self.get_grouped_emotions_with_timestamp(data.classifier, emotions, timestamp), ignore_index=True)
if data.data_frame.shape[0] > self.MAX_NUMBER_OF_ROW:
data.data_frame = data.data_frame.drop(data.data_frame.index[0])
if data.grouped_data_frame.shape[0] > self.MAX_NUMBER_OF_ROW:
data.grouped_data_frame = data.grouped_data_frame.drop(data.grouped_data_frame.index[0])
if not self.save_data or "no_face" in emotions_with_timestamp:
return
self.save_emotions_to_file(emotions_with_timestamp, data)
def get_emotions_with_timestamp(self, emotions_dict, timestamp):
result = emotions_dict.copy()
result.update({"timestamp": timestamp})
return result
def get_grouped_emotions_with_timestamp(self, classifier, emotions, timestamp):
grouped_results, grouped_labels = classifier.group(list(emotions.values()), list(emotions.keys()))
grouped_emotions = self.get_emotion_dictionary(grouped_labels, grouped_results)
grouped_emotions["timestamp"] = timestamp
return grouped_emotions
def save_emotions_to_file(self, emotions, data):
df = pd.DataFrame(emotions, index=[0])
file_path = os.path.join(self.directory_path, data.file_name)
with open(file_path, 'a') as f:
df.to_csv(f, header=f.tell() == 0)
def save_raw_data(self, type, bytes, timestamp):
self.save_emotions_from_raw_data(type, bytes, timestamp)
if not self.save_data:
return
if type == "video":
self.save_picture(bytes)
elif type == "audio":
self.save_audio(bytes)
def save_picture(self, bytes):
image = Image.open(io.BytesIO(bytes))
timestamp = datetime.timestamp(datetime.now())
file_path = os.path.join(self.directory_path, str(int(timestamp)) + ".png")
image.save(file_path, "PNG")
def save_audio(self, bytes):
if bytes != b'':
count = int(len(bytes) / 4)
floats = struct.unpack(">" + ('f' * count), bytes)
timestamp = datetime.timestamp(datetime.now())
file_path = os.path.join(self.directory_path, str(int(timestamp)) + ".wav")
sf.write(file_path, np.array(floats), 44100, 'PCM_16', endian="FILE")
def get_video_labels(self, type):
return self.get_labels(self.video, type)
def get_audio_labels(self, type):
return self.get_labels(self.audio, type)
def get_labels(self, data, type):
if type == DataType.EMOTIONS:
return self.get_labels_without_timestamp(data.data_frame)
elif type == DataType.EMOTIONS_FROM_RAW_DATA:
return self.get_labels_without_timestamp(data.raw_data_data_frame)
elif type == DataType.EMOTIONS_GROUPED:
return self.get_labels_without_timestamp(data.grouped_data_frame)
elif type == DataType.EMOTIONS_FROM_RAW_DATA_GROUPED:
return self.get_labels_without_timestamp(data.grouped_raw_data_data_frame)
def get_labels_without_timestamp(self, data_frame):
column_names = list(data_frame)
if "timestamp" in column_names:
column_names.remove("timestamp")
return column_names
def get_video_data(self, type):
return self.get_data(type, self.video)
def get_audio_data(self, type):
return self.get_data(type, self.audio)
def get_data(self, type, data):
if type == DataType.EMOTIONS:
return self.get_data_from_df(data.data_frame)
elif type == DataType.EMOTIONS_FROM_RAW_DATA:
return self.get_data_from_df(data.raw_data_data_frame)
elif type == DataType.EMOTIONS_GROUPED:
return self.get_data_from_df(data.grouped_data_frame)
elif type == DataType.EMOTIONS_FROM_RAW_DATA_GROUPED:
return self.get_data_from_df(data.grouped_raw_data_data_frame)
def get_data_from_df(self, data_frame):
data_list = []
for index, row in data_frame.iterrows():
x = row["timestamp"]
time = datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
x = f"{time.year}-{time.month}-{time.day:02d} {time.hour:02d}:{time.minute:02d}:{time.second:02d}"
biggest_val = 0
y = None
for key, value in row.items():
if key == "timestamp":
continue
if value > biggest_val:
biggest_val = value
y = key
data_list.append({"x": x, "y": y})
data_list.sort(key=lambda x: x["x"])
return data_list
def save_emotions_from_raw_data(self, type, bytes, timestamp):
if type == "video":
data = self.video
else:
data = self.audio
emotions, grouped_emotions = self.classify_emotions(data.classifier, bytes, timestamp)
data.raw_data_data_frame = data.raw_data_data_frame.append(emotions, ignore_index=True)
if data.raw_data_data_frame.shape[0] > self.MAX_NUMBER_OF_ROW:
data.raw_data_data_frame = data.raw_data_data_frame.drop(data.raw_data_data_frame.index[0])
data.grouped_raw_data_data_frame = data.grouped_raw_data_data_frame.append(grouped_emotions, ignore_index=True)
if data.grouped_raw_data_data_frame.shape[0] > self.MAX_NUMBER_OF_ROW:
data.grouped_raw_data_data_frame = data.grouped_raw_data_data_frame.drop(
data.grouped_raw_data_data_frame.index[0])
def classify_emotions(self, classifier, bytes, timestamp):
results, labels = classifier.classify(bytes)
emotions = self.get_emotion_dictionary(labels, results)
emotions["timestamp"] = timestamp
grouped_results, grouped_labels = classifier.group(results, labels)
grouped_emotions = self.get_emotion_dictionary(grouped_labels, grouped_results)
grouped_emotions["timestamp"] = timestamp
return emotions, grouped_emotions
def get_emotion_dictionary(self, labels, results):
result = {}
for l, e in zip(labels, results):
result[l] = e
return result
def update_nn_name(self, name, type):
if type == "audio":
self.audio.robot_NN_name = name
else:
self.video.robot_NN_name = name
|
"""Unit test package for grblc."""
|
'''
Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Note:
You may only use constant extra space.
Recursive approach is fine, implicit stack space does not count as extra space for this problem.
You may assume that it is a perfect binary tree (ie, all leaves are at the same level, and every parent has two children).
Example:
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL
'''
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if not root:
return
vec = [root]
while len(vec):
next_vec = []
for i in xrange(len(vec) - 1):
vec[i].next = vec[i+1]
for i in xrange(len(vec)):
if vec[i].left:
next_vec.append(vec[i].left)
if vec[i].right:
next_vec.append(vec[i].right)
vec = next_vec
|
#
# Basic PoW Auth class
#
import werkzeug.security
from {{appname}}.models.tinydb import pow_user as Model
class PowAuth(object):
def check_auth( login, pwd ):
"""
checks the login, password hash combination
against the pow_user model
"""
m = Model()
try:
m = m.find(m.Query.login == login )
except:
return (False, "Cant find user: " + login)
try:
return werkzeug.security.check_password_hash(m.pwd_hash, pwd )
except:
return (False, "Wrong password for user: " + login)
|
import numpy as np
from pax import plugin
from pax.dsputils import adc_to_pe
class DesaturatePulses(plugin.TransformPlugin):
"""Estimates the waveform shape in channels that go beyond the digitizer's dynamic range, using
the other channels' waveform shape as a template.
pulse.w will be changed from int16 to float64
See Fei & Yuehan's note: media=xenon:feigao:xenon1t_background_comparison_jan2017.html
"""
def startup(self):
self.reference_baseline = self.config['digitizer_reference_baseline']
def transform_event(self, event):
tpc_channels = np.array(self.config['channels_in_detector']['tpc'])
# Boolean array, tells us which pulses are saturated
is_saturated = np.array([p.maximum >= self.reference_baseline - p.baseline - 0.5
for p in event.pulses])
for pulse_i, pulse in enumerate(event.pulses):
# Consider only saturated pulses in the TPC
if not is_saturated[pulse_i] or pulse.channel not in tpc_channels:
continue
# Where is the current pulse saturated?
saturated = pulse.raw_data <= 0 # Boolean array, True if sample is saturated
_where_saturated_all = np.where(saturated)[0]
# Split saturation if there is long enough non-saturated samples in between
_where_saturated_diff = np.diff(_where_saturated_all, n=1)
_where_saturated_diff = np.where(_where_saturated_diff > self.config['reference_region_samples'])[0]
_where_saturated_list = np.split(_where_saturated_all, _where_saturated_diff+1)
# Find all pulses in TPC channels that overlap with the saturated & reference region
other_pulses = [p for i, p in enumerate(event.pulses)
if p.left < pulse.right and p.right > pulse.left and
not is_saturated[i] and
p.channel in tpc_channels and
p.channel not in self.config['large_after_pulsing_channels']]
if not len(other_pulses):
# Rare case where no other pulses available, one channel going crazy?
continue
for peak_i, _where_saturated in enumerate(_where_saturated_list):
try:
first_saturated = _where_saturated.min()
last_saturated = _where_saturated.max()
except (ValueError, RuntimeError, TypeError, NameError):
continue
# Select a reference region just before the start of the saturated region
reference_slice = slice(max(0, first_saturated - self.config['reference_region_samples']),
first_saturated)
# Compute the (gain-weighted) sum waveform of the non-saturated pulses
min_left = min([p.left for p in other_pulses + [pulse]])
max_right = max([p.right for p in other_pulses + [pulse]])
sumw = np.zeros(max_right - min_left + 1)
for p in other_pulses:
offset = p.left - min_left
sumw[offset:offset + len(p.raw_data)] += self.waveform_in_pe(p)
# Crop it to include just the part that overlaps with this pulse
offset = pulse.left - min_left
sumw = sumw[offset:offset + len(pulse.raw_data)]
# Compute the ratio of this channel's waveform / the nonsaturated waveform in the reference region
w = self.waveform_in_pe(pulse)
if len(sumw[reference_slice][sumw[reference_slice] > 1]) \
< self.config['reference_region_samples_treshold']:
# the pulse is saturated, but there are not enough reference samples to get a good ratio
# This actually distinguished between S1 and S2 and will only correct S2 signals
continue
ratio = w[reference_slice].sum()/sumw[reference_slice].sum()
# not < is preferred over >, since it will catch nan
if not ratio < self.config.get('min_reference_area_ratio', 1):
# The pulse is saturated, but insufficient information is available in the other channels
# to reliably reconstruct it
continue
if len(w[reference_slice][w[reference_slice] > 1]) < self.config['reference_region_samples_treshold']:
# the pulse is saturated, but there are not enough reference samples to get a good ratio
# This actually distinguished between S1 and S2 and will only correct S2 signals
continue
# Finding individual section of wf for each peak
# First end before the reference region of next peak
if peak_i+1 == len(_where_saturated_list):
end = len(w)
else:
end = _where_saturated_list[peak_i+1][0]-self.config['reference_region_samples']
# Second end before the first upwards turning point
v = sumw[last_saturated: end]
conv = np.ones(self.config['convolution_length'])/self.config['convolution_length']
v = np.convolve(conv, v, mode='same')
dv = np.diff(v, n=1)
# Choose +2 pe/ns instead 0 to avoid ending on the flat waveform
turning_point = np.where((np.hstack((dv, -10)) > 2) & (np.hstack((10, dv)) <= 2))[0]
if len(turning_point) > 0:
end = last_saturated + turning_point[0]
# Reconstruct the waveform in the saturated region according to this ratio.
# The waveform should never be reduced due to this (then the correction is making things worse)
saturated_to_correct = np.arange(int(first_saturated), int(end))
w[saturated_to_correct] = np.clip(sumw[saturated_to_correct] * ratio, 0, float('inf'))
# Convert back to raw ADC counts and store the corrected waveform
# Note this changes the type of pulse.w from int16 to float64: we don't have a choice,
# int16 probably can't contain the large amplitudes we may be putting in.
# As long as the raw data isn't saved again after applying this correction, this should be no problem
# (as in later code converting to floats is anyway the first step).
w /= adc_to_pe(self.config, pulse.channel)
w = self.reference_baseline - w - pulse.baseline
pulse.raw_data = w
return event
def waveform_in_pe(self, p):
"""Return waveform in pe/bin above baseline of a pulse"""
w = self.reference_baseline - p.raw_data.astype(np.float) - p.baseline
w *= adc_to_pe(self.config, p.channel)
return w
|
from dataclasses import dataclass, field
from enum import Enum
from re import compile
from string import Formatter
from typing import AnyStr, ClassVar, List
from lark import Token
class IssueType(str, Enum):
WARNING = 'Warning'
ERROR = 'Error'
@dataclass(order=True)
class Issue:
MD_REGEX: ClassVar = compile(r'`[^`]*`')
type: IssueType
line: int
column: int
message: str = field(compare=False)
@property
def html_message(self):
return self.MD_REGEX.sub(lambda match: f'<code>{match[0][1:-1]}</code>', self.message)
@dataclass
class IssueCollector:
__dirty: bool = field(default=False, init=False)
__issues: list = field(default_factory=list, init=False)
__by_type: dict = field(default_factory=lambda: {key: [] for key in IssueType}, init=False)
def add_error(self, source: Token, message: str, *args: AnyStr):
self.add(IssueType.ERROR, source, message, *args)
def add_warning(self, source: Token, message: str, *args: AnyStr):
self.add(IssueType.WARNING, source, message, *args)
def add(self, issue_type: IssueType, source: Token, message: str, *args: AnyStr):
self.__issues.append(Issue(issue_type, source.line, source.column, _format(message, *args)))
self.__dirty = True
def extend(self, other: 'IssueCollector'):
self.__issues.extend(other.__issues)
self.__dirty = True
@property
def errors(self) -> List[Issue]:
self.__clean()
return self.__by_type[IssueType.ERROR]
@property
def warnings(self) -> List[Issue]:
self.__clean()
return self.__by_type[IssueType.WARNING]
@property
def issues(self) -> List[Issue]:
self.__clean()
return self.__issues
def __clean(self):
if not self.__dirty:
return
self.__issues = sorted(self.__issues)
for key in IssueType:
self.__by_type[key].clear()
for issue in self.__issues:
self.__by_type[issue.type].append(issue)
self.__dirty = False
class IssueFormatter(Formatter):
def get_field(self, field_name, args, kwargs):
# Only allow to get data directly from args / kwargs
try:
key = int(field_name)
except ValueError:
key = field_name
return self.get_value(key, args, kwargs), key
def get_value(self, key, args, kwargs):
value = super().get_value(key, args, kwargs)
# Token class gets confused when the value is actually `bytes`
# instead of `str`, so we manually need to get the value.
if isinstance(value, Token):
value = value.value
if isinstance(value, bytes):
value = value.decode('utf-8')
return value
_format = IssueFormatter().format
|
import os
from setuptools import find_packages
from setuptools import setup
info = {}
version = os.path.join("arlunio", "_version.py")
with open(version) as f:
exec(f.read(), info)
def readme():
with open("README.md") as f:
return f.read()
required = ["attrs", "appdirs", "ipython", "numpy", "Pillow>=6.1.0"]
extras = {
"dev": [
"black",
"flake8",
"hypothesis",
"jupyterlab",
"pre-commit",
"pytest",
"pytest-cov",
"sphinx-autobuild",
"sphinx_rtd_theme",
"sphobjinv",
"towncrier",
"tox",
],
"doc": ["sphinx", "nbformat"],
"examples": ["jupyterlab"],
"testing": ["hypothesis"],
}
extras["all"] = list(
{item for name, items in extras.items() if name != "dev" for item in items}
)
setup(
name="arlunio",
version=info["__version__"],
project_urls={
"Documentation": "https://www.arlun.io/docs/",
"Source": "https://github.com/swyddfa/arlunio",
"Tracker": "https://github.com/swyddfa/arlunio/issues",
},
description="Drawing and animating with a blend of Python and mathematics.",
long_description=readme(),
long_description_content_type="text/markdown",
author="Swyddfa Developers",
author_email="swyddfa.dev@gmail.com",
license="MIT",
packages=find_packages(".", exclude=["tests*"]),
include_package_data=True,
python_requires=">=3.7",
install_requires=required,
extras_require=extras,
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Multimedia :: Graphics",
],
entry_points={
"console_scripts": ["arlunio = arlunio.cli.__main__:main"],
"sphinx.builders": ["nbtutorial = arlunio.doc", "nbgallery = arlunio.doc"],
"arlunio.backends": [
"numpy = arlunio.backends.numpy:NumpyBackend",
],
"arlunio.cli.commands": [
"repl = arlunio.cli.repl:Repl",
"tutorial = arlunio.cli.tutorial:Tutorial",
],
},
)
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from synapse.util.caches.descriptors import cached
from collections import namedtuple
from canonicaljson import encode_canonical_json
import logging
logger = logging.getLogger(__name__)
class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
"""
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
body (as a dict).
Args:
transaction_id (str)
origin(str)
Returns:
tuple: None if we have not previously responded to
this transaction or a 2-tuple of (int, dict)
"""
return self.runInteraction(
"get_received_txn_response",
self._get_received_txn_response, transaction_id, origin
)
def _get_received_txn_response(self, txn, transaction_id, origin):
result = self._simple_select_one_txn(
txn,
table=ReceivedTransactionsTable.table_name,
keyvalues={
"transaction_id": transaction_id,
"origin": origin,
},
retcols=ReceivedTransactionsTable.fields,
allow_none=True,
)
if result and result["response_code"]:
return result["response_code"], result["response_json"]
else:
return None
def set_received_txn_response(self, transaction_id, origin, code,
response_dict):
"""Persist the response we returened for an incoming transaction, and
should return for subsequent transactions with the same transaction_id
and origin.
Args:
txn
transaction_id (str)
origin (str)
code (int)
response_json (str)
"""
return self._simple_insert(
table=ReceivedTransactionsTable.table_name,
values={
"transaction_id": transaction_id,
"origin": origin,
"response_code": code,
"response_json": buffer(encode_canonical_json(response_dict)),
},
or_ignore=True,
desc="set_received_txn_response",
)
def prep_send_transaction(self, transaction_id, destination,
origin_server_ts):
"""Persists an outgoing transaction and calculates the values for the
previous transaction id list.
This should be called before sending the transaction so that it has the
correct value for the `prev_ids` key.
Args:
transaction_id (str)
destination (str)
origin_server_ts (int)
Returns:
list: A list of previous transaction ids.
"""
return self.runInteraction(
"prep_send_transaction",
self._prep_send_transaction,
transaction_id, destination, origin_server_ts
)
def _prep_send_transaction(self, txn, transaction_id, destination,
origin_server_ts):
next_id = self._transaction_id_gen.get_next_txn(txn)
# First we find out what the prev_txns should be.
# Since we know that we are only sending one transaction at a time,
# we can simply take the last one.
query = (
"SELECT * FROM sent_transactions"
" WHERE destination = ?"
" ORDER BY id DESC LIMIT 1"
)
txn.execute(query, (destination,))
results = self.cursor_to_dict(txn)
prev_txns = [r["transaction_id"] for r in results]
# Actually add the new transaction to the sent_transactions table.
self._simple_insert_txn(
txn,
table=SentTransactions.table_name,
values={
"id": next_id,
"transaction_id": transaction_id,
"destination": destination,
"ts": origin_server_ts,
"response_code": 0,
"response_json": None,
}
)
# TODO Update the tx id -> pdu id mapping
return prev_txns
def delivered_txn(self, transaction_id, destination, code, response_dict):
"""Persists the response for an outgoing transaction.
Args:
transaction_id (str)
destination (str)
code (int)
response_json (str)
"""
return self.runInteraction(
"delivered_txn",
self._delivered_txn,
transaction_id, destination, code,
buffer(encode_canonical_json(response_dict)),
)
def _delivered_txn(self, txn, transaction_id, destination,
code, response_json):
self._simple_update_one_txn(
txn,
table=SentTransactions.table_name,
keyvalues={
"transaction_id": transaction_id,
"destination": destination,
},
updatevalues={
"response_code": code,
"response_json": None, # For now, don't persist response_json
}
)
def get_transactions_after(self, transaction_id, destination):
"""Get all transactions after a given local transaction_id.
Args:
transaction_id (str)
destination (str)
Returns:
list: A list of dicts
"""
return self.runInteraction(
"get_transactions_after",
self._get_transactions_after, transaction_id, destination
)
def _get_transactions_after(self, txn, transaction_id, destination):
query = (
"SELECT * FROM sent_transactions"
" WHERE destination = ? AND id >"
" ("
" SELECT id FROM sent_transactions"
" WHERE transaction_id = ? AND destination = ?"
" )"
)
txn.execute(query, (destination, transaction_id, destination))
return self.cursor_to_dict(txn)
@cached()
def get_destination_retry_timings(self, destination):
"""Gets the current retry timings (if any) for a given destination.
Args:
destination (str)
Returns:
None if not retrying
Otherwise a dict for the retry scheme
"""
return self.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings, destination)
def _get_destination_retry_timings(self, txn, destination):
result = self._simple_select_one_txn(
txn,
table=DestinationsTable.table_name,
keyvalues={
"destination": destination,
},
retcols=DestinationsTable.fields,
allow_none=True,
)
if result and result["retry_last_ts"] > 0:
return result
else:
return None
def set_destination_retry_timings(self, destination,
retry_last_ts, retry_interval):
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Args:
destination (str)
retry_last_ts (int) - time of last retry attempt in unix epoch ms
retry_interval (int) - how long until next retry in ms
"""
# XXX: we could chose to not bother persisting this if our cache thinks
# this is a NOOP
return self.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
destination,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings(self, txn, destination,
retry_last_ts, retry_interval):
txn.call_after(self.get_destination_retry_timings.invalidate, (destination,))
self._simple_upsert_txn(
txn,
"destinations",
keyvalues={
"destination": destination,
},
values={
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
insertion_values={
"destination": destination,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
}
)
def get_destinations_needing_retry(self):
"""Get all destinations which are due a retry for sending a transaction.
Returns:
list: A list of dicts
"""
return self.runInteraction(
"get_destinations_needing_retry",
self._get_destinations_needing_retry
)
def _get_destinations_needing_retry(self, txn):
query = (
"SELECT * FROM destinations"
" WHERE retry_last_ts > 0 and retry_next_ts < ?"
)
txn.execute(query, (self._clock.time_msec(),))
return self.cursor_to_dict(txn)
class ReceivedTransactionsTable(object):
table_name = "received_transactions"
fields = [
"transaction_id",
"origin",
"ts",
"response_code",
"response_json",
"has_been_referenced",
]
class SentTransactions(object):
table_name = "sent_transactions"
fields = [
"id",
"transaction_id",
"destination",
"ts",
"response_code",
"response_json",
]
EntryType = namedtuple("SentTransactionsEntry", fields)
class TransactionsToPduTable(object):
table_name = "transaction_id_to_pdu"
fields = [
"transaction_id",
"destination",
"pdu_id",
"pdu_origin",
]
class DestinationsTable(object):
table_name = "destinations"
fields = [
"destination",
"retry_last_ts",
"retry_interval",
]
|
'''
Created on Jul 26, 2013
@author: Yubin Bai
'''
import sys
INF = 1 << 31
def solve(par):
N, M, Q, mat, query = par
result = []
for q in query:
c = mat[q[0]][q[1]]
size = 1
finished = False
while not finished:
sStr = c * size
if q[0] - size // 2 >= 0 and q[0] + size // 2 < N \
and q[1] - size // 2 >= 0 and q[1] + size // 2:
for i in range(q[0] - size // 2, q[0] + size // 2 + 1):
if sStr != mat[i][q[1] - size // 2:q[1] + size // 2 + 1]:
finished = True
size -= 2
break
size += 2
else:
break
result.append(str(size - 2))
return '\n'.join(result)
if __name__ == '__main__':
sys.stdin = open('input.txt', 'r')
numTest = int(input())
for itertest in range(numTest):
N, M, Q = map(int, raw_input().split())
mat = []
for i in range(N):
mat.append(raw_input().strip())
query = []
for i in range(Q):
query.append(map(int, raw_input().split()))
print(solve((N, M, Q, mat, query)))
|
# -*- coding: utf-8 -*-l
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import absolute_import, print_function, unicode_literals
from cnpsdk import (utils, communication)
conf = utils.Configuration()
SERVICE_ROUTE = "/services/chargebacks"
"""
/////////////////////////////////////////////////////
ChargebackRetrieval API:
/////////////////////////////////////////////////////
"""
def upload_document(case_id, document_path, config=conf):
document_id = document_path.split("/")[-1]
url_suffix = SERVICE_ROUTE + "/upload/" + str(case_id) + "/" + str(document_id)
return communication.http_post_document_request(url_suffix, document_path, config=config)
def retrieve_document(case_id, document_id, document_path, config=conf):
url_suffix = SERVICE_ROUTE + "/retrieve/" + str(case_id) + "/" + str(document_id)
communication.http_get_document_request(url_suffix, document_path, config=config)
def replace_document(case_id, document_id, document_path, config=conf):
url_suffix = SERVICE_ROUTE + "/replace/" + case_id + "/" + document_id
return communication.http_put_document_request(url_suffix, document_path, config=config)
def delete_document(case_id, document_id, config=conf):
url_suffix = SERVICE_ROUTE + "/delete/" + str(case_id) + "/" + str(document_id)
return communication.http_delete_document_response(url_suffix, config=config)
def list_documents(case_id, config=conf):
url_suffix = SERVICE_ROUTE + "/list/" + str(case_id)
return communication.http_get_document_list_request(url_suffix, config=config)
|
import requests,json,sys,re,os
from datetime import datetime
from VideoIDHelper import *
#if you intend to extend this module
#simply call
#saveYouTubeAnnotations.retrieveAnnotation
#
#arg is either an ID, link or shortened link
#and location is the location the XML file is to be saved in
def retrieveShows(arg,location="showJSONs/"):
#print(idExtractor(arg))
contents=""
episode ={}
try:
vID = idExtractor(arg)
pars = {"v" : vID, "hl":"en"}
r = requests.get("https://www.youtube.com/watch", params=pars)
contents = r.content
sC = str(contents)
if(len(contents) != 0):
#print( str(contents).count("watch-extras-section"))
#print( sC.find("ul", sC.find("content watch-info-tag-list")) )
#print(len('<span class="standalone-collection-badge-renderer-text">'))
c = sC.find('<span class="standalone-collection-badge-renderer-text">') + 56
splice = sC[c:sC.find('</span>',c)]
#print(splice)
#get title
title = splice[:splice.find('</a></b>')]
title = title[title.rfind('>')+1:]
#print("T:"+title)
episode = season = splice[splice.find('</a></b>')+8:]
#episode = season = splice[season.rfind('>')+1:]
#get season
season=season[season.find('S')+1:]
season=season[:season.find(' ')]
#print("S:"+season)
#get episode
episode=episode[episode.find('E')+1:]
#episode=episode[:episode.find(' ')]
#episode=episode[episode.find('E')+1:]
#print(episode)
episode={"title":title, "season":season, "episode":episode }
#print (episode)
try:
with open( location+"{}.json".format(vID), 'w') as f:
json.dump(episode, f)
#with open( (location+"{}.html").format(vID),"wb") as f:
# f.write(contents)
except:
with open( location+"/{}.json".format(vID), 'w') as f:
json.dump(episode, f)
#with open( (location+"/{}.html").format(vID),"wb") as f:
# f.write(contents)
else:
return None
return episode
except:
return None
return None
def main():
first = True
argument = ""
print( "Hello today is: " + str(datetime.now().month) + "/" + str(datetime.now().day))
print( "Remember that we have time until: " + "1/15" + " (presumably PST 0:00) " )
#retrieveShows("https://www.youtube.com/watch?v=BZLGKFWlRzY&list=PLhyKYa0YJ_5BevK2pZGDi-zUMorOSn2ed")
#return
while first or argument == "":
#argument ="horse"
argument = input("Type in a URL to video or its ID: ")
try:
if argument == "":
print("Program Terminated")
break
result = retrieveShows(argument)
if result != None:
print("Loaded Succesfully.")
else:
print("Unable to Load, File not Generated.")
except:
print("Unable to recognize {}".format(argument))
print("Program Terminated")
break
#argument = ""
#first = False
if __name__== "__main__":
main()
|
"""
Kivy Widget that accepts data and displays qrcode.
"""
import os
from functools import partial
from threading import Thread
import qrcode
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.lang import Builder
from kivy.properties import (BooleanProperty, ListProperty, NumericProperty,
StringProperty)
from kivy.uix.floatlayout import FloatLayout
class QRCodeWidget(FloatLayout):
show_border = BooleanProperty(True)
"""Whether to show border around the widget.
:data:`show_border` is a :class:`~kivy.properties.BooleanProperty`,
defaulting to `True`.
"""
data = StringProperty(None, allow_none=True)
"""Data using which the qrcode is generated.
:data:`data` is a :class:`~kivy.properties.StringProperty`, defaulting to
`None`.
"""
error_correction = NumericProperty(qrcode.constants.ERROR_CORRECT_L)
"""The error correction level for the qrcode.
:data:`error_correction` is a constant in :module:`~qrcode.constants`,
defaulting to `qrcode.constants.ERROR_CORRECT_L`.
"""
background_color = ListProperty((1, 1, 1, 1))
"""Background color of the background of the widget to be displayed
behind the qrcode.
:data:`background_color` is a :class:`~kivy.properties.ListProperty`,
defaulting to `(1, 1, 1, 1)`.
"""
loading_image = StringProperty('data/images/image-loading.gif')
"""Intermediate image to be displayed while the widget ios being loaded.
:data:`loading_image` is a :class:`~kivy.properties.StringProperty`,
defaulting to `'data/images/image-loading.gif'`.
"""
def __init__(self, **kwargs):
module_dir = os.path.dirname(os.path.abspath(__file__))
Builder.load_file(os.path.join(module_dir, "qrcode_widget.kv"))
super().__init__(**kwargs)
self.addr = None
self.qr = None
self._qrtexture = None
def on_data(self, instance, value):
if not (self.canvas or value):
return
img = self.ids.get('qrimage', None)
if not img:
# if texture hasn't yet been created delay the texture updating
Clock.schedule_once(lambda dt: self.on_data(instance, value))
return
img.anim_delay = .25
img.source = self.loading_image
Thread(target=partial(self.generate_qr, value)).start()
def on_error_correction(self, instance, value):
self.update_qr()
def generate_qr(self, value):
self.set_addr(value)
self.update_qr()
def set_addr(self, addr):
if self.addr == addr:
return
MinSize = 210 if len(addr) < 128 else 500
self.setMinimumSize((MinSize, MinSize))
self.addr = addr
self.qr = None
def update_qr(self):
if not self.addr and self.qr:
return
QRCode = qrcode.QRCode
addr = self.addr
try:
self.qr = qr = QRCode(
version=None,
error_correction=self.error_correction,
box_size=10,
border=0,
)
qr.add_data(addr)
qr.make(fit=True)
except Exception as e:
print(e)
self.qr = None
self.update_texture()
def setMinimumSize(self, size):
# currently unused, do we need this?
self._texture_size = size
def _create_texture(self, k, dt):
self._qrtexture = texture = Texture.create(size=(k, k), colorfmt='rgb')
# don't interpolate texture
texture.min_filter = 'nearest'
texture.mag_filter = 'nearest'
def update_texture(self):
if not self.addr:
return
matrix = self.qr.get_matrix()
k = len(matrix)
# create the texture in main UI thread otherwise
# this will lead to memory corruption
Clock.schedule_once(partial(self._create_texture, k), -1)
cr, cg, cb, ca = self.background_color[:]
cr, cg, cb = int(cr*255), int(cg*255), int(cb*255)
# used bytearray for python 3.5 eliminates need for btext
buff = bytearray()
for r in range(k):
for c in range(k):
buff.extend([0, 0, 0] if matrix[r][c] else [cr, cg, cb])
# then blit the buffer
# join not necessary when using a byte array
# buff =''.join(map(chr, buff))
# update texture in UI thread.
Clock.schedule_once(lambda dt: self._upd_texture(buff))
def _upd_texture(self, buff):
texture = self._qrtexture
if not texture:
# if texture hasn't yet been created delay the texture updating
Clock.schedule_once(lambda dt: self._upd_texture(buff))
return
texture.blit_buffer(buff, colorfmt='rgb', bufferfmt='ubyte')
texture.flip_vertical()
img = self.ids.qrimage
img.anim_delay = -1
img.texture = texture
img.canvas.ask_update()
if __name__ == '__main__':
import sys
from kivy.app import runTouchApp
data = str(sys.argv[1:])
runTouchApp(QRCodeWidget(data=data))
|
# Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module consolidating permission handling routines
"""
#
# IMPORTS
#
from tessia.server.db.exceptions import AssociationError
from tessia.server.db.models import Project
from tessia.server.db.models import ResourceMixin
from tessia.server.db.models import Role
from tessia.server.db.models import RoleAction
from tessia.server.db.models import UserRole
import logging
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class PermManager:
"""
Manage user permission verifications
"""
def __init__(self):
"""
Constructor, creates logger instance.
"""
self._logger = logging.getLogger(__name__)
# __init__()
def _assert_create(self, user, item):
"""
Verify if the user can create the target item.
Args:
user (User): user db object
item (db.models.BASE): target's db object (not committed yet)
Returns:
str: project where user can create item
Raises:
AssociationError: if an invalid project was specified
PermissionError: if user has no rights
"""
# WARNING: it's very important to refer to the field itself
# 'project_id' and not to the relation 'project' because the db object
# is not committed yet which means the relation does not work
# model is a resource without project or ownership
# Such resources are generally handled by administrators and users with
# special permissions
if not issubclass(item.__class__, ResourceMixin):
# user is admin: the operation is allowed
if user.admin:
return None
# User roles may be changed within a project, when requester
# has special permissions, e.g. has OWNER_PROJECT role
if issubclass(item.__class__, UserRole):
# Check requester permissions
project = self._get_project_for_action(
user, 'CREATE', 'USER_ROLES', item.project_id)
if project is not None:
return project
raise PermissionError(
'User has no CREATE permission to grant user roles '
'in this project')
# for non admins, action is prohibited
raise PermissionError(
'You need administrator privileges to perform this operation')
# project specified by an admin user: no permission verification needed
if item.project_id is not None and user.admin:
try:
project_name = Project.query.filter_by(
id=item.project_id).first().id
except AttributeError:
raise AssociationError(
model=item, column='project_id',
value=item.project_id, associated_model=Project,
associated_column='id')
return project_name
# If a project was specified, verify if the user has create permission
# on it, otherwise try to find a project where user has create
# permission. In case both fail a forbidden exception is raised.
project_match = self._get_project_for_action(
user, 'CREATE', item.__tablename__, item.project_id)
# permission was found or validated: return corresponding project
if project_match is not None:
return project_match
# user did not specified project: report no project with permission
# was found
if item.project_id is None:
if user.admin:
msg = ('Could not detect which project to use, specify one')
else:
msg = ('No CREATE permission found for the user in any '
'project')
# project was specified: report that user has no permission on it
else:
msg = ('User has no CREATE permission for the specified '
'project')
raise PermissionError(msg)
# _assert_create()
def _assert_permission(self, user, action, target_obj, target_type):
"""
Helper function, asserts if the given user has the necessary
permissions to perform the specified action upon the target.
Args:
user (User): user db object
action (str): one of CREATE, UPDATE, DELETE
target_obj (db.models.BASE): db object
target_type (str): type of the target to report in case of error
Raises:
PermissionError: in case user has no permission
"""
# model is a resource without project or ownership
# Such resources are generally handled by administrators and users with
# special permissions
if not issubclass(target_obj.__class__, ResourceMixin):
# user is admin: the operation is allowed
if user.admin:
return
# User roles may be changed within a project, when requester
# has special permissions, e.g. has OWNER_PROJECT role
if issubclass(target_obj.__class__, UserRole):
# Check if the user has permission
project = self._get_project_for_action(
user, action, 'USER_ROLES', target_obj.project_id)
if project is not None:
return
raise PermissionError(
'User has no {} permission to update user roles '
'in this project'.format(action))
# for non admins, action is prohibited
raise PermissionError(
'You need administrator privileges to perform this operation')
# user is owner or an administrator: permission is granted
if self.is_owner_or_admin(user, target_obj):
return
match = self._get_project_for_action(
user, action, target_obj.__tablename__, target_obj.project_id)
# no permission in target's project: report error
if match is None:
msg = ('User has no {} permission for the specified '
'{}'.format(action, target_type))
raise PermissionError(msg)
# _assert_permission()
def _assert_read(self, user, item):
"""
Verify if the given user has access to read the target item.
Args:
user (User): user db object
item (db.models.BASE): target's db object
Raises:
PermissionError: if user has no permission
"""
# model is a special resource: reading is allowed for all
if not issubclass(item.__class__, ResourceMixin):
return
# non restricted user: regular resource reading is allowed
if not user.restricted:
return
# user is not the resource's owner or an administrator: verify
# if they have a role in resource's project
if not self.is_owner_or_admin(user, item):
# no role in system's project
if self.get_role_for_project(user, item.project_id) is None:
raise PermissionError(
"User has no role assigned in resource's project")
# _assert_read()
@staticmethod
def _get_project_for_action(user, action_name, resource_type,
project_id=None):
"""
Query the database and return the name of the project which allows
the user to perform the specified operation, or None if no such
permission exists.
Args:
user (User): user db object
action_name (str): the action to be performed (i.e. CREATE)
resource_type (string): tablename of target's resource
project_id (int): id of the target project, if None means
to find a suitable project
Returns:
str: project name, or None if not found
"""
query = Project.query.join(
UserRole, UserRole.project_id == Project.id
).filter(
UserRole.user_id == user.id
).filter(
RoleAction.role_id == UserRole.role_id
).filter(
RoleAction.resource == resource_type.upper()
).filter(
RoleAction.action == action_name
)
# no project specified: find one that allows the specified action
if project_id is None:
query = query.filter(UserRole.project_id == Project.id)
# project specified: verify if there is a permission for the user to
# perform the specified action on that project
else:
query = query.filter(UserRole.project_id == project_id)
project = query.first()
if project is not None:
project = project.name
return project
# _get_project_for_action()
@staticmethod
def get_role_for_project(user, project_id):
"""
Query the db for any role associated with the given user on the
provided project.
Args:
user (User): user db object
project_id (int): id of the target project
Returns:
UserRole: a role associated with user or None if not found
"""
query = UserRole.query.join(
'project_rel'
).join(
'user_rel'
).join(
'role_rel'
).filter(
UserRole.user_id == user.id
).filter(
Role.id == UserRole.role_id
).filter(
UserRole.project_id == project_id
)
return query.first()
# get_role_for_project()
def can(self, action, user, item, item_desc='resource'):
"""
Verify if a given action can be performed by a given user on a given
object.
Args:
action (str): one of CREATE, DELETE, READ, UPDATE
user (User): user db object
item (db.models.BASE): target's db object
item_desc (str): an optional item description to be used in error
messages
Returns:
str: for create action, project on which user has permission
Raises:
PermissionError: if user has no permission
ValueError: if action is update/delete and item is None
"""
action = action.upper()
if action in ('UPDATE', 'DELETE'):
self._assert_permission(user, action, item, item_desc)
return None
if action == 'CREATE':
return self._assert_create(user, item)
if action == 'READ':
self._assert_read(user, item)
return None
raise ValueError('Cannot validate unknown action <{}>'.format(action))
# can()
@staticmethod
def is_owner_or_admin(user, target_obj):
"""
Return whether the given user is the owner of the target object or an
administrator.
Args:
user (User): user db object
target_obj (ResourceMixin): db object
Returns:
bool: True if user is administrator or owner of the target
"""
return target_obj.owner_id == user.id or user.admin
# is_owner_or_admin()
# PermManager()
|
from django.conf.urls import url
from rest_framework.routers import DefaultRouter, SimpleRouter
from booktest import views
urlpatterns = [
]
# 路由Router: 动态生成视图集中处理函数的url配置项
# router = SimpleRouter()
router = DefaultRouter() # 路由Router
router.register('books', views.BookInfoViewSet, base_name='books') # 向路由Router中注册视图集
urlpatterns += router.urls # 将路由Router生成的url配置信息添加到django的路由列表中
|
import networkx as nx
import math
import operator
from datetime import datetime
import os
import queue
import copy
import threading
import crestdsl.model as model
import crestdsl.model.api as api
from crestdsl.simulation.simulator import Simulator
import crestdsl.simulation.dependencyOrder as DO
from crestdsl.caching import Cache
import logging
logger = logging.getLogger(__name__)
# logger = multiprocessing.log_to_stderr()
# logger.setLevel(multiprocessing.SUBDEBUG)
EXPLORED = "explored"
class StateSpace(nx.DiGraph):
"""
Creates a graph of initial node + stabilised nodes.
Graph elements are SystemState objects.
This class is based on networkx.DiGraph.
See networkx documentation for more info.
"""
def __init__(self, system=None, *args, **kwargs):
"""
Parameters
----------
system: Entity
A crestdsl entity that will serve as the system to explore.
*args:
Arguments that are passed on to the underlying networkx structure.
**kwargs:
Arguments that are passed on to the underlying networkx structure.
"""
super().__init__(*args, **kwargs)
if system:
self.graph["system"] = system
sysstate = SystemState(system).save()
self.graph["root"] = sysstate
self.add_node(sysstate, label="INIT", explored=False) # initial node
def explore_until_time(self, time):
"""
Asserts that the graph is expanded so all paths have a minimum length
i.e. the length to the leaves is at least a amount
Parameters
----------
time: numeric
The minimum length of all paths between root and the nodes
"""
system = self.graph["system"]
# save state for later
current_system_state_backup = SystemState(system).save()
logger.info(f"Expanding until all leaves have a minimum path of more than {time} time units.")
leaves = [v for v, d in self.out_degree() if d == 0]
i = 0
while len(leaves) > 0:
i += 1
if i % 100 == 0:
logger.info(f"There are {len(leaves)} leaf nodes left to explore. (State space size: {len(self)} nodes)")
leaf = leaves.pop()
if self.out_degree(leaf) == 0 and not self.nodes(data=EXPLORED, default=False)[leaf]:
try:
length, path = nx.single_source_dijkstra(self, source=self.graph["root"], target=leaf, cutoff=time)
logger.debug(f"Leaf {leaf} reachable in {length} time units. Calculating successors.")
successors_transitions, dt = self.calculate_successors_for_node(leaf)
for successor, transitions in successors_transitions:
transitions = [operator.attrgetter(trans)(system) for trans in transitions]
successor = successor.deserialize(system)
self.add_edge(leaf, successor, weight=dt, transitions=transitions)
leaves.append(successor)
except nx.NetworkXNoPath:
logger.debug(f"No path to node {leaf} within {time} time units. That's okay.")
logger.info(f"Total size of statespace: {len(self)} nodes")
# revert system back to original state
current_system_state_backup.apply()
def explore(self, iterations_left=1, iteration_counter=1, parallel=False):
"""
Asserts that the graph is expanded so all paths have a minimum length
i.e. the length to the leaves is at least a amount
Parameters
----------
iterations_left: int
How many iterations of exploration should be done (or None for "inifitely many")
iteration_counter: int
Don't specify it. It's for logging purposes only.
parallel: bool
Unstable. Don't use it!
"""
# save state for later
current_system_state_backup = SystemState(self.graph["system"]).save()
with Cache() as c:
final_counter = self._explore(iterations_left, iteration_counter, parallel=parallel)
# reset system state
current_system_state_backup.apply()
logger.info(f"Total size of statespace: {len(self)} nodes")
return final_counter # say how many iterations we did
def _explore(self, iterations_left=1, iteration_counter=1, parallel=False):
if iterations_left is None:
iterations_left = math.inf
logger.info(f"Expanding. (Current iteration: #{iteration_counter}, Iterations left: {iterations_left}) (Time now: {datetime.now().strftime('%H:%M:%S')})")
if iterations_left > 0 and self.calculate_successors(parallel): # returns if we should reiterate
return self._explore(iterations_left=iterations_left - 1, iteration_counter=iteration_counter+1, parallel=parallel)
else:
logger.info(f"Nothing more to expand. Stop now. Exploration cycles left: {iterations_left}")
return iteration_counter
def calculate_successors(self, parallel=False):
if parallel: # go parallel if necessary
return self.calculate_successors_parallel()
""" Returns True if new leaf nodes were added """
unexplored = [n for (n, exp) in self.nodes(data=EXPLORED, default=False) if not exp]
logger.info(f"Calculating successors of {len(unexplored)} unexplored nodes")
system = self.graph["system"]
continue_exploration = False
for ssnode in unexplored:
logger.debug(f"calculating successors of node {ssnode}")
successors_transitions, dt = self.calculate_successors_for_node(ssnode)
self.nodes[ssnode][EXPLORED] = True
# logger.debug(f"successors are: {successors}, after {ssnode.max_dt}")
for successor, transitions in successors_transitions:
transitions = [operator.attrgetter(trans)(system) for trans in transitions]
successor = successor.deserialize(system)
self.add_edge(ssnode, successor, weight=dt, transitions=transitions)
continue_exploration = True # successors found, meaning that we should continue exploring
return continue_exploration
def calculate_successors_for_node(self, ssnode):
""" CAREFUL!! this is the one where we only do one at a time!
BUT: it's faster than executing the parallel one on one thread"""
logger.debug(f"Calculating successors of node {ssnode}")
if getattr(self, "_ssc_cache", None) is None:
self._ssc_cache = StateSpaceCalculator(ssnode.system, own_context=False)
ssc = self._ssc_cache
ssnode.apply()
successor_transitions, dt = ssc.advance_to_nbct()
return successor_transitions, dt
def calculate_successors_parallel(self):
system = self.graph["system"]
unexplored = [n for (n, exp) in self.nodes(data=EXPLORED, default=False) if not exp]
logger.info(f"Calculating successors of {len(unexplored)} unexplored nodes")
NUM_theads = 1 #min(1, len(unexplored))
# q = queue.Queue(maxsize=0)
# [q.put(n) for n in unexplored]
logger.info(f"Launching {NUM_theads} thread(s) to find the successors")
job_queue = queue.Queue()
for unex in unexplored:
job_queue.put(unex)
results = []
thread_workers = []
for i in range(NUM_theads):
thread_worker = CrawlerThread(job_queue, results, system)
# thread_worker = threading.Thread(target=thread_crawler, args=(job_queue, results, system))
thread_workers.append(thread_worker)
thread_worker.setDaemon(True)
thread_worker.start()
job_queue.join()
# stop all threads, so they don't run infinitely long
for tw in thread_workers:
tw.stop()
logger.info(f"Done, the results are in!")
# print(results)
# DEAL WITH RESULTS !!
continue_exploration = False
for (ssnode, successors_transitions, dt) in results:
self.nodes[ssnode][EXPLORED] = True
for successor, transitions in successors_transitions:
transitions = [operator.attrgetter(trans)(system) for trans in transitions]
successor_node = successor.deserialize(system)
self.add_edge(ssnode, successor_node, weight=dt, transitions=transitions)
continue_exploration = True # successors found, meaning that we should continue exploring
return continue_exploration
def calculate_successors_parallel_process(self):
PROCESSORS = len(os.sched_getaffinity(0)) #os.cpu_count() # how many CPUs can we use?
unexplored = [n for (n, exp) in self.nodes(data=EXPLORED, default=False) if not exp]
logger.info(f"Calculating successors of {len(unexplored)} unexplored nodes")
continue_exploration = False
with multiprocessing.Pool(PROCESSORS) as pool:
systempickle = pickle.dumps(self.graph["system"])
mapresult = pool.map_async(parallel_calc, [(u.serialize(), systempickle) for u in unexplored])
listof_succ_transitions_pairs = mapresult.get()
pool.close()
pool.join()
print("Result:", listof_succ_transitions_pairs)
for successors_transitions in listof_succ_transitions_pairs:
for successor, transitions in successors_transitions:
self.add_edge(ssnode, successor, weight=dt, transitions=transitions)
continue_exploration = True # successors found, meaning that we should continue exploring
return continue_exploration
def run_in_thread(ssnode, results):
system_copy = ssnode.create() # creates a copy of the system with the encoded state
system_copy._constraint_cache = None
ssc = StateSpaceCalculator(system_copy, own_context=False) # returns serialized stuff
successor_transitions, dt = ssc.advance_to_nbct()
results.append( (ssnode, successor_transitions, dt) )
return True
class CrawlerThread(threading.Thread):
def __init__(self, job_queue, results, system, timeout=3):
super().__init__()
self._run = True
self.job_queue = job_queue
self.results = results
self.system = system
self.timeout = timeout
def run(self):
system_copy = copy.deepcopy(self.system)
system_copy._constraint_cache = None
ssc = StateSpaceCalculator(system_copy, own_context=True) # returns serialized stuff
while self._run:
try:
ssnode = self.job_queue.get(timeout=self.timeout) #fetch new work from the Queue, wait at most 1 second
ssnode.serialize().deserialize(system_copy) # creates a copy of the system with the encoded state
successor_transitions, dt = ssc.advance_to_nbct()
self.results.append( (ssnode, successor_transitions, dt) )
self.job_queue.task_done()
except queue.Empty as e:
logger.debug(f"Nothing new for {self.timeout} seconds. I'm stopping thread now.")
return True
return True
def stop(self):
self._run = False
def thread_crawler(job_queue, results, system):
system_copy = copy.deepcopy(system)
system_copy._constraint_cache = None
ssc = StateSpaceCalculator(system_copy, own_context=False) # returns serialized stuff
while True:
ssnode = job_queue.get() #fetch new work from the Queue
logger.debug(f"Calculating successors of node {ssnode}")
ssnode.serialize().deserialize(system_copy) # creates a copy of the system with the encoded state
successor_transitions, dt = ssc.advance_to_nbct()
results.append( (ssnode, successor_transitions, dt) )
job_queue.task_done()
return True
def as_dataframe(statespace):
node_vals = []
for node in statespace.nodes:
new_dict = {}
new_dict.update(node.systemstate.states)
new_dict.update(node.systemstate.ports)
node_vals.append(new_dict)
import pandas
df = pandas.DataFrame(node_vals)
return df
class SystemState(object):
""" An encoding of the system. Stores current state, current port values and pre port values"""
def __init__(self, system):
self.system = system
self.states = {}
self.ports = {}
self.pre = {}
def save(self):
"""Creates two maps, one with <entity: current>, the other one with <port, value>"""
current_states = {entity: entity.current for entity in model.get_all_entities(self.system)}
self.states.update(current_states)
port_values = {port: port.value for port in model.get_all_ports(self.system)}
self.ports.update(port_values)
pre_values = {port: port.pre for port in model.get_all_ports(self.system) if hasattr(port, "pre")}
self.pre.update(pre_values)
return self # to be able to do state = CrestState(system).save()
def update(self):
self._hash = None # reset hash
self.save()
def apply(self, system=None):
"""Applies the stored state to the stored system"""
for entity, state in self.states.items():
try:
entity.current = state
except:
print(entity)
breakpoint()
for port, value in self.ports.items():
port.value = value
for port, value in self.pre.items():
port.pre = value
def create(self):
"""Creates a copy of the system with the given state"""
newsys = copy.deepcopy(self.system)
for entity, state in self.states.items():
model.get_equivalent_in_system(self.system, entity, newsys).current = \
model.get_equivalent_in_system(self.system, state, newsys)
for port, value in self.ports.items():
model.get_equivalent_in_system(self.system, port, newsys).value = value
for port, value in self.pre.items():
model.get_equivalent_in_system(self.system, port, newsys).pre = value
return newsys
def __eq__(self, other):
"""
Returns if the state is the same,
i.e. if the other's entities's states are the same and if the values are the same.
"""
# TODO: should this use the hash function?
if isinstance(other, model.Entity):
other = SystemState(other).save()
if isinstance(other, SystemState):
states_match = all(item in self.states.items() for item in other.states.items())
ports_match = all(item in self.ports.items() for item in other.ports.items())
pres_match = all(item in self.pre.items() for item in other.pre.items())
return states_match and ports_match and pres_match
def __hash__(self):
"""Hashing the three dictionaries, so we can quickly decide whether they're the same"""
big_dict = list(self.states.items()) + list(self.ports.items()) + list(self.pre.items())
return hash(frozenset(big_dict))
def diff(self, other):
"""
Returns triplets for states and ports that don't have the same values:
- (entity, self.states[entity], other.states[entity])
- (port, self.ports[port], other.ports[port])
- (port, self.pre[port], other.pre[port])
We probably need to add a means to distinguish port and pre values...
"""
states = [(entity, state, other.states[entity]) for entity, state in self.states.items() if state is not other.states[entity]]
ports = [(port, value, other.ports.get(port, None)) for port, value in self.ports.items() if value != other.ports.get(port, None)]
pre = [(port, value, other.pre.get(port, None)) for port, value in self.pre.items() if value is not other.pre.get(port, None)]
return states, ports, pre
def serialize(self):
if len(self.states) == 0: # this means we haven't saved yet
self.save()
ss = SystemState(None)
ss.states = {model.get_path_to_attribute(self.system, entity): model.get_path_to_attribute(self.system, state) for entity, state in self.states.items()}
ss.ports = {model.get_path_to_attribute(self.system, port): value for port, value in self.ports.items()}
ss.pre = {model.get_path_to_attribute(self.system, port): value for port, value in self.pre.items()}
return ss
def deserialize(self, system):
for entity, state in self.states.items():
if entity == "":
system.current = operator.attrgetter(state)(system)
else:
operator.attrgetter(entity)(system).current = operator.attrgetter(state)(system)
for port, value in self.ports.items():
operator.attrgetter(port)(system).value = value
for port, value in self.pre.items():
operator.attrgetter(port)(system).value = value
return SystemState(system).save()
class StateSpaceCalculator(Simulator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.record_traces = False # don't do logging here, we don't need it
self.dependencyOrder_cache = {}
def get_DO_cached(self, entity):
if entity not in self.dependencyOrder_cache:
self.dependencyOrder_cache[entity] = {}
if entity.current not in self.dependencyOrder_cache[entity]:
# the dependency order is defined by the state we're in. Nothing else.
self.dependencyOrder_cache[entity][entity.current] = DO.get_entity_modifiers_in_dependency_order(entity)
return self.dependencyOrder_cache[entity][entity.current]
def advance_and_stabilise(self, entity, time):
""" saves the transitions in a list """
# logger.debug(f"Time: {self.global_time} | Advancing {time} and stabilising entity {entity._name} ({entity.__class__.__name__})")
for port in api.get_targets(entity): # + api.get_targets(entity):
port.pre = port.value
systemstates = [ (SystemState(self.system).save(), []) ]
for mod in self.get_DO_cached(entity): #DO.get_entity_modifiers_in_dependency_order(entity):
if isinstance(mod, model.Influence):
# logger.debug(f"Time: {self.global_time} | Triggering influence {mod._name} in entity {entity._name} ({entity.__class__.__name__})")
for (sysstate, transitions) in systemstates:
sysstate.apply() # restores the captured state
newval = self._get_influence_function_value(mod)
if newval != mod.target.value:
# logger.info(f"Time: {self.global_time} | Port value changed: {mod.target._name} ({mod.target._parent._name}) {mod.target.value} -> {newval}")
mod.target.value = newval
# self.stategraph.add_edge(sysstate, SystemState(self.system).save(), modtype="influence", modifier=mod, time=time, entity=entity)
sysstate.update() # store the current values
elif isinstance(mod, model.Update):
# logger.debug(f"Triggering update {mod._name} in entity {entity._name} ({entity.__class__.__name__})")
for (sysstate, transitions) in systemstates:
sysstate.apply()
newval = self._get_update_function_value(mod, time)
if newval != mod.target.value:
# logger.info(f"Time: {self.global_time} | Port value changed: {mod.target._name} ({mod.target._parent._name}) {mod.target.value} -> {newval}")
mod.target.value = newval
# self.stategraph.add_edge(sysstate, SystemState(self.system).save(), modtype="update", modifier=mod, time=time, entity=entity)
sysstate.update()
elif isinstance(mod, model.Entity):
new_systemstates = []
for (sysstate, transitions) in systemstates:
sysstate.apply()
new_systemstates.extend(
[ (new_state, transitions + new_trans) for (new_state, new_trans) in self.advance_and_stabilise(mod, time)]
)
systemstates = new_systemstates # the returned states are the new ones
""" store pre's """
for (sysstate, transitions) in systemstates:
sysstate.apply()
# set pre again, for the actions that are triggered after the transitions
for port in api.get_targets(entity): # + api.get_targets(entity):
port.pre = port.value
sysstate.update()
""" check if transitions are enabled and do them """
new_systemstates = []
for (sysstate, transitions) in systemstates:
sysstate.apply()
# returns the new transition_states or
# EMPTY LIST if no transitions fired (!!!)
# this is important and a convention here
states_after_transitions = self.transition(entity)
if len(states_after_transitions) > 0:
for (tstate, ttransitions) in states_after_transitions:
tstate.apply()
new_systemstates.extend(
[ (new_state, ttransitions + new_trans) for (new_state, new_trans) in self.advance_and_stabilise(entity, 0)]
) # we already advanced time-wise, but now make sure that we're stable (only if we fired a transition...)
else:
new_systemstates.append( (sysstate, transitions) ) # nothing changed, so keep this state
# logger.debug(f"Finished advancing {time} and stabilising entity {entity._name} ({entity.__class__.__name__})")
return new_systemstates
def transition(self, entity):
# logger.debug(f"transitions in entity {entity._name} ({entity.__class__.__name__})")
transitions_from_current_state = [t for t in model.get_transitions(entity) if t.source is entity.current]
enabled_transitions = [t for t in transitions_from_current_state if self._get_transition_guard_value(t)]
state_before = SystemState(self.system).save() # backup the state
states_after = []
for transition in enabled_transitions:
state_before.apply() # reset to original state
entity.current = transition.target
# logger.info(f"Time: {self.global_time} | Firing transition <<{transition._name}>> in {entity._name} ({entity.__class__.__name__}) : {transition.source._name} -> {transition.target._name} | current global time: {self.global_time}")
transition_updates = [up for up in model.get_updates(transition._parent) if up.state is transition] # FIXME: until we completely switched to only allowing actions...
actions = [a for a in model.get_actions(transition._parent) if a.transition is transition]
for act in actions + transition_updates:
newval = self._get_action_function_value(act)
if newval != act.target.value:
act.target.value = newval
state_after = SystemState(self.system).save()
states_after.append( (state_after, [transition]) )
# return the new states if there are any (this means that empty list means no transitions were fired)
# logger.debug(f"finished transitions in entity {entity._name} ({entity.__class__.__name__}): Created {len(states_after)} new states!")
return states_after
def advance_to_nbct(self):
saved_state = SystemState(self.system).save() # save system state so we can reset it
nbct = self.next_behaviour_change_time()
if nbct is None: # no behaviour change and no next transition through time advance
return [], None
dt = nbct[0]
if dt > 0:
succ_states_and_transitions = self.advance(dt)
else:
succ_states_and_transitions = self.stabilise()
# succ_states = {s for s, ts in succ_states_and_transitions} # reduce to set
serialized = []
for succ_state, transitions in succ_states_and_transitions:
ser_succ_state = succ_state.serialize()
ser_transitions = [model.get_path_to_attribute(self.system, trans) for trans in transitions]
serialized.append( (ser_succ_state, ser_transitions) )
# print(serialized)
saved_state.apply() # reset system state
return serialized, dt
|
import json
import random
import requests
import dydx.util as utils
import dydx.constants as consts
import dydx.solo_orders as solo_orders
import dydx.perp_orders as perp_orders
from decimal import Decimal
from dydx.eth import Eth
from .exceptions import DydxAPIError
class Client(object):
BASE_API_URI = 'https://api.dydx.exchange'
def __init__(
self,
private_key,
account_number=0,
node=None
):
self.private_key = utils.normalize_private_key(private_key)
self.account_number = account_number
self.public_address = utils.private_key_to_address(self.private_key)
self.session = self._init_session()
self.eth = Eth(
node=node,
private_key=self.private_key,
public_address=self.public_address,
account_number=self.account_number
)
# -----------------------------------------------------------
# Helper Methods
# -----------------------------------------------------------
def _init_session(self):
session = requests.session()
session.headers.update({
'Accept': 'application/json',
'Content-Type': 'application/json',
'User-Agent': 'dydx/python'
})
return session
def _request(self, method, uri, **kwargs):
complete_uri = self.BASE_API_URI + uri
response = getattr(self.session, method)(complete_uri, **kwargs)
if not str(response.status_code).startswith('2'):
raise DydxAPIError(response)
return response.json()
def _get(self, *args, **kwargs):
return self._request('get', *args, **kwargs)
def _post(self, *args, **kwargs):
return self._request('post', *args, **kwargs)
def _put(self, *args, **kwargs):
return self._request('put', *args, **kwargs)
def _delete(self, *args, **kwargs):
return self._request('delete', *args, **kwargs)
def _make_solo_order(
self,
market,
side,
amount,
price,
expiration=None,
limitFee=None,
postOnly=False,
):
'''
Make an order object
:param market: required
:type market: str in list
["WETH-DAI", "WETH-USDC", "DAI-USDC"]
:param side: required
:type side: str in list ["BUY", "SELL"]
:param amount: required
:type amount: number
:param price: required
:type price: Decimal
:param expiration: optional, defaults to 28 days from now
:type expiration: number
:param limitFee: optional, overrides the default limitFee
:type limitFee: number
:param postOnly: optional, defaults to False
:type postOnly: bool
:returns: Order
:raises: DydxAPIError
'''
baseMarket, quoteMarket = utils.pair_to_base_quote_markets(market)
isBuy = utils.get_is_buy(side)
if limitFee is None:
limitFee = consts.DEFAULT_LIMIT_FEE
order = {
'salt': random.randint(0, 2**256),
'isBuy': isBuy,
'baseMarket': baseMarket,
'quoteMarket': quoteMarket,
'amount': int(float(amount)),
'limitPrice': price,
'triggerPrice': Decimal(0),
'limitFee': limitFee,
'makerAccountOwner': self.public_address,
'makerAccountNumber': self.account_number,
'expiration': expiration or utils.epoch_in_four_weeks(),
}
order['typedSignature'] = \
solo_orders.sign_order(order, self.private_key)
return order
def _make_perp_order(
self,
market,
side,
amount,
price,
expiration=None,
limitFee=None,
postOnly=False,
):
'''
Make an order object
:param market: required
:type market: str in list [
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param side: required
:type side: str in list ["BUY", "SELL"]
:param amount: required
:type amount: number
:param price: required
:type price: Decimal
:param expiration: optional, defaults to 28 days from now
:type expiration: number
:param limitFee: optional, overrides the default limitFee
:type limitFee: number
:param postOnly: optional, defaults to False
:type postOnly: bool
:returns: Order
:raises: DydxAPIError
'''
isBuy = utils.get_is_buy(side)
if limitFee is None:
limitFee = consts.DEFAULT_LIMIT_FEE
order = {
'salt': random.randint(0, 2**256),
'isBuy': isBuy,
'amount': int(float(amount)),
'limitPrice': price,
'triggerPrice': Decimal(0),
'limitFee': limitFee,
'maker': self.public_address,
'taker': consts.TAKER_ACCOUNT_OWNER,
'expiration': expiration or utils.epoch_in_four_weeks(),
}
order['typedSignature'] = \
perp_orders.sign_order(order, market, self.private_key)
return order
# -----------------------------------------------------------
# Public API
# -----------------------------------------------------------
def get_pairs(
self
):
'''
Return all tradable pairs
:returns: list of trading pairs
:raises: DydxAPIError
'''
return self._get('/v2/markets')
def get_my_balances(
self
):
'''
Return balances for the loaded account
:returns: list of balances
:raises: DydxAPIError
'''
return self.get_balances(
address=self.public_address,
number=self.account_number
)
def get_balances(
self,
address,
number=0
):
'''
Return balances for an address and account number
:param address: required
:type address: str (address)
:param number: optional, defaults to 0
:type number: number
:returns: list of balances
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'number': number
})
return self._get('/v1/accounts/' + address + params)
def get_my_perpetual_balances(
self
):
'''
Return perpetual balances for the loaded account
:returns: list of balances
:raises: DydxAPIError
'''
return self.get_perpetual_balances(address=self.public_address)
def get_perpetual_balances(
self,
address
):
'''
Return perpetual balances for an address
:param address: required
:type address: str (address)
:returns: list of balances
:raises: DydxAPIError
'''
return self._get('/v1/perpetual-accounts/' + address)
def get_my_orders(
self,
market,
limit=None,
startingBefore=None,
status=None
):
'''
Return open orders for the loaded account
:param market: optional
:type market: str[] of valid markets [
"WETH-DAI",
"DAI-USDC",
"WETH-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:param status: optional
:type status: str[] of valid statuses
["PENDING",
"OPEN",
"FILLED",
"PARTIALLY_FILLED",
"CANCELED",
"UNTRIGGERED"]
:returns: list of existing orders
:raises: DydxAPIError
'''
return self.get_orders(
market=market,
status=status,
accountOwner=self.public_address,
accountNumber=self.account_number,
limit=limit,
startingBefore=startingBefore
)
def get_orders(
self,
market=None,
side=None,
status=None,
orderType=None,
accountOwner=None,
accountNumber=None,
limit=None,
startingBefore=None,
):
'''
Returns all open orders
:param market: optional
:type market: str[] of valid markets [
"WETH-DAI",
"DAI-USDC",
"WETH-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param side: optional
:type side: str in list ["BUY", "SELL"]
:param status: optional
:type status: str[] of valid statuses
["PENDING",
"OPEN",
"FILLED",
"PARTIALLY_FILLED",
"CANCELED",
"UNTRIGGERED"]
:param orderType: optional
:type orderType: str[] of valid order types
["LIMIT", "ISOLATED_MARKET", "STOP_LIMIT"]
:param accountOwner: optional
:type accountOwner: str (address)
:param accountNumber: optional
:type accountNumber: number
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: list of existing orders
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'market': None if market is None else ','.join(market),
'side': side,
'status': None if status is None else ','.join(status),
'orderType': None if orderType is None else ','.join(orderType),
'accountOwner': accountOwner,
'accountNumber': accountNumber,
'limit': limit,
'startingBefore': startingBefore
})
return self._get('/v2/orders' + params)
def get_order(
self,
orderId,
):
'''
Return an order by id
:param orderId: required
:type id: str
:returns: existing order
:raises: DydxAPIError
'''
return self._get('/v2/orders/'+orderId)
def get_my_fills(
self,
market,
limit=None,
startingBefore=None
):
'''
Return historical fills for the loaded account
:param market: optional
:type market: str[] of valid markets [
"WETH-DAI",
"DAI-USDC",
"WETH-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: list of processed fills
:raises: DydxAPIError
'''
return self.get_fills(
market=market,
accountOwner=self.public_address,
accountNumber=self.account_number,
transactionHash=None,
limit=limit,
startingBefore=startingBefore
)
def get_fills(
self,
market=None,
side=None,
accountOwner=None,
accountNumber=None,
transactionHash=None,
limit=None,
startingBefore=None,
):
'''
Returns all historical fills
:param market: optional
:type market: str[] of valid markets [
"WETH-DAI",
"DAI-USDC",
"WETH-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param side: optional
:type side: str in list ["BUY", "SELL"]
:param accountOwner: optional
:type accountOwner: str (address)
:param accountNumber: optional
:type accountNumber: number
:param transactionHash: optional
:type transactionHash: str (hash)
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: list of existing fills
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'market': None if market is None else ','.join(market),
'side': side,
'accountOwner': accountOwner,
'accountNumber': accountNumber,
'transactionHash': transactionHash,
'limit': limit,
'startingBefore': startingBefore
})
return self._get('/v2/fills' + params)
def get_trades(
self,
market=None,
side=None,
accountOwner=None,
accountNumber=None,
transactionHash=None,
limit=None,
startingBefore=None,
):
'''
Returns all historical trades
:param market: optional
:type market: str[] of valid markets [
"WETH-DAI",
"DAI-USDC",
"WETH-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param side: optional
:type side: str in list ["BUY", "SELL"]
:param accountOwner: optional
:type accountOwner: str (address)
:param accountNumber: optional
:type accountNumber: number
:param transactionHash: optional
:type transactionHash: str (hash)
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: list of existing trades
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'market': None if market is None else ','.join(market),
'side': side,
'accountOwner': accountOwner,
'accountNumber': accountNumber,
'transactionHash': transactionHash,
'limit': limit,
'startingBefore': startingBefore,
})
return self._get('/v2/trades' + params)
def get_my_trades(
self,
market,
limit=None,
startingBefore=None
):
'''
Return historical trades for the loaded account
:param market: required
:type market: list of str
:param limit: optional, defaults to 100
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: list of processed trades
:raises: DydxAPIError
'''
return self.get_trades(
market=market,
accountOwner=self.public_address,
accountNumber=self.account_number,
limit=limit,
startingBefore=startingBefore
)
def place_order(
self,
market,
side,
amount,
price,
expiration=None,
limitFee=None,
fillOrKill=False,
postOnly=False,
clientId=None,
cancelAmountOnRevert=None,
cancelId=None,
):
'''
Create an order
:param market: required
:type market: str in list [
"WETH-DAI",
"WETH-USDC",
"DAI-USDC",
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param side: required
:type side: str in list ["BUY", "SELL"]
:param amount: required
:type amount: number
:param price: required
:type price: Decimal
:param expiration: optional, defaults to 28 days from now
:type expiration: number
:param limitFee: optional, defaults to None
:type limitFee: Decimal
:param fillOrKill: optional, defaults to False
:type fillOrKill: bool
:param postOnly: optional, defaults to False
:type postOnly: bool
:param clientId: optional, defaults to None
:type clientId: str
:param cancelAmountOnRevert: optional, defaults to None
:type cancelAmountOnRevert: bool
:param cancelId: optional, defaults to None
:type cancelId: str
:returns: Order
:raises: DydxAPIError
'''
if market in [
consts.PAIR_PBTC_USDC,
consts.PAIR_PLINK_USDC,
consts.PAIR_WETH_PUSD,
]:
order = self._make_perp_order(
market,
side,
amount,
price,
expiration,
limitFee,
postOnly,
)
market_api_request = market
order_api_request = {
'isBuy': order['isBuy'],
'isDecreaseOnly': False,
'amount': str(order['amount']),
'limitPrice': utils.decimalToStr(order['limitPrice']),
'triggerPrice': utils.decimalToStr(order['triggerPrice']),
'limitFee': utils.decimalToStr(order['limitFee']),
'maker': order['maker'],
'taker': order['taker'],
'expiration': str(order['expiration']),
'salt': str(order['salt']),
'typedSignature': order['typedSignature'],
}
else:
order = self._make_solo_order(
market,
side,
amount,
price,
expiration,
limitFee,
postOnly,
)
market_api_request = None
order_api_request = {
'isBuy': order['isBuy'],
'isDecreaseOnly': False,
'baseMarket': str(order['baseMarket']),
'quoteMarket': str(order['quoteMarket']),
'amount': str(order['amount']),
'limitPrice': utils.decimalToStr(order['limitPrice']),
'triggerPrice': utils.decimalToStr(order['triggerPrice']),
'limitFee': utils.decimalToStr(order['limitFee']),
'makerAccountOwner': order['makerAccountOwner'],
'makerAccountNumber': str(order['makerAccountNumber']),
'expiration': str(order['expiration']),
'salt': str(order['salt']),
'typedSignature': order['typedSignature'],
}
return self._post('/v2/orders', data=json.dumps(
utils.remove_nones({
'fillOrKill': fillOrKill,
'postOnly': postOnly,
'clientId': clientId,
'cancelAmountOnRevert': cancelAmountOnRevert,
'cancelId': cancelId,
'market': market_api_request,
'order': order_api_request
})
))
def cancel_order(
self,
hash
):
'''
Cancel an order in a solo market.
:param hash: required
:type hash: str
:returns: Order
:raises: DydxAPIError
'''
signature = solo_orders.sign_cancel_order(hash, self.private_key)
return self._delete(
'/v2/orders/' + hash,
headers={'Authorization': 'Bearer ' + signature}
)
def cancel_perpetual_order(
self,
hash
):
'''
Cancel an order in a perpetual market.
:param hash: required
:type hash: str
:returns: Order
:raises: DydxAPIError
'''
signature = perp_orders.sign_cancel_order(hash, self.private_key)
return self._delete(
'/v2/orders/' + hash,
headers={'Authorization': 'Bearer ' + signature}
)
def get_orderbook(
self,
market
):
'''
Get the active orderbook for a market
:param market: required, name of market (e.g. WETH-DAI)
:returns: { asks: OrderOnOrderbook[], bids: OrderOnOrderbook[] }
:raises: DydxAPIError
'''
return self._get('/v1/orderbook/' + market)
def get_market(
self,
market
):
'''
Get market from market pair
:param market: required
:type market: str in list
["WETH-DAI", "WETH-USDC", "DAI-USDC"]
:returns: { market: MarketMessageV2 }
:raises: DydxAPIError
'''
return self._get('/v2/markets/' + market)
def get_markets(
self
):
'''
Get all markets
:returns: { markets : { [market: str]: MarketMessageV2 } }
:raises: DydxAPIError
'''
return self._get('/v2/markets')
def get_perpetual_market(
self,
market
):
'''
Get market from market pair
:param market: required
:type market: str in list [
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:returns: { market: PerpetualMarket }
:raises: DydxAPIError
'''
return self._get('/v1/perpetual-markets/' + market)
def get_perpetual_markets(
self
):
'''
Get all markets
:returns: { markets : [market: str]: PerpetualMarket } }
:raises: DydxAPIError
'''
return self._get('/v1/perpetual-markets')
def get_funding_rates(
self,
markets=None,
):
'''
Get the current and predicted funding rates.
IMPORTANT: The `current` value returned by this function is not active
until it has been mined on-chain, which may not happen for some period
of time after the start of the hour. To get the funding rate that is
currently active on-chain, use the get_perpetual_market() or
get_perpetual_markets() function.
The `current` rate is updated each hour, on the hour. The `predicted`
rate is updated each minute, on the minute, and may be null if no
premiums have been calculated since the last funding rate update.
:param markets: optional, defaults to all Perpetual markets
:type markets: str in list [
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:returns: {
[market: str]: { current: FundingRate, predicted: FundingRate }
}
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'markets': None if markets is None else ','.join(markets),
})
return self._get('/v1/funding-rates' + params)
def get_historical_funding_rates(
self,
markets=None,
limit=None,
startingBefore=None,
):
'''
Get historical funding rates.
:param markets: optional, defaults to all Perpetual markets
:type markets: str in list [
"PBTC-USDC",
"PLINK-USDC",
"WETH-PUSD",
]
:param limit: optional, defaults to 100, which is the maximum
:type limit: number
:param startingBefore: optional, defaults to now
:type startingBefore: str date and time (ISO-8601)
:returns: { [market: str]: { history: FundingRate[] } }
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'markets': None if markets is None else ','.join(markets),
'limit': limit,
'startingBefore': startingBefore,
})
return self._get(
'/v1/historical-funding-rates' + params,
)
def get_funding_index_price(
self,
markets=None,
):
'''
Get the index price used in the funding rate calculation.
:param markets: optional, defaults to all Perpetual markets
:type markets: str in list [
"PBTC-USDC",
"PLINK_USDC",
"WETH-PUSD",
]
:returns: { [market: str]: { price: str } }
:raises: DydxAPIError
'''
params = utils.dict_to_query_params({
'markets': None if markets is None else ','.join(markets),
})
return self._get('/v1/index-price' + params)
|
import sys
sys.stdout.write('Hello\nWorld\n')
|
# For live classification
from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM
import comp_vis.calibration_tools as ct
import comp_vis.data_tools as dt
import comp_vis.img_tools as it
import comp_vis.live_tools as lt
import numpy as np
import os
import random
import sys
import time
already_cropped = True
# Check the correct number of arguments have been provided
if len(sys.argv) < 3 or len(sys.argv) > 5:
sys.stderr.write("Error: Invalid argument configuration.\n" +
"Possible argument configurations examples: \n" +
"live_demo.py perceptron [camera_num] [bones_folder_path] [rocks_folder_path]\n" +
"live_demo.py perceptron [camera_num] [location to previous weights]")
exit()
# The first command line argument describes the ML approach to be used (ex: perceptron)
# The second argument (int) specifies which camera to use.
approach_name = sys.argv[1]
camera_num = int(sys.argv[2])
try:
model = dt.string_to_model(approach_name)
except ValueError:
exit()
# If there are two additional arguments, then these arguments should be a folder containing images of rocks,
# as well as a folder containing images of bones to be used in the training process.
if len(sys.argv) == 4:
# Then load weight from location
if not os.path.isfile(sys.argv[3]):
sys.stderr.write("Error: No file found at path " + sys.argv[3])
exit()
model.load_weights(sys.argv[4])
elif len(sys.argv) == 5:
# Load bones and rocks
bones_path = sys.argv[3]
rocks_path = sys.argv[4]
if not os.path.exists(bones_path):
sys.stderr.write("Error: Invalid path " + bones_path)
exit()
if not os.path.exists(rocks_path):
sys.stderr.write("Error: Invalid path " + rocks_path)
exit()
# Load images from selected path
bone_images_whole = it.load_images(bones_path)
rock_images_whole = it.load_images(rocks_path)
# Translate images to the key data we'll train on
bone_data = dt.images_to_data(bone_images_whole, 1)
rock_data = dt.images_to_data(rock_images_whole, 0)
# Combine datasets
data = np.append(bone_data, rock_data, 0)
training_data, testing_data = dt.training_and_testing_sep(data, 0.50)
model.train(training_data)
print("Accuracy on loaded data: " + str(model.assess_accuracy(testing_data)))
thresh_s = ct.calibrate_thressholding()
lt.live_labeling(model, camera_no=camera_num, threshold_settings=thresh_s)
|
import os
import yaml
import logging
from multiprocessing import Queue
from flask import Flask, jsonify, request
from .worker import Worker
logger = logging.getLogger('triggerflow-worker')
app = Flask(__name__)
event_queue = None
workspace = None
worker = None
@app.route('/', methods=['POST'])
def run():
"""
Get an event and forward it into an internal queue
"""
def error():
response = jsonify({'error': 'The worker did not receive a dictionary as an argument.'})
response.status_code = 404
return response
message = request.get_json(force=True, silent=True)
if message and not isinstance(message, dict):
return error()
print('Receiving message...')
event_queue.put(message)
print('Message received')
return jsonify('Message received'), 201
def main():
global event_queue, workspace, worker
workspace = os.environ.get('WORKSPACE')
print('Starting workspace {}'.format(workspace))
print('Loading private credentials')
with open('config.yaml', 'r') as config_file:
credentials = yaml.safe_load(config_file)
event_queue = Queue()
worker = Worker(workspace, credentials, event_queue)
worker.run()
main()
|
"""
This module provides the AIPSTask class. It adapts the Task class from
the Task module to be able to run classic AIPS tasks:
>>> imean = AIPSTask('imean')
The resulting class instance has all associated adverbs as attributes:
>>> print imean.ind
0.0
>>> imean.ind = 1
>>> print imean.indisk
1.0
>>> imean.indi = 2.0
>>> print imean.ind
2.0
It also knows the range for these attributes:
>>> imean.ind = -1
Traceback (most recent call last):
...
ValueError: value '-1.0' is out of range for attribute 'indisk'
>>> imean.ind = 10.0
Traceback (most recent call last):
...
ValueError: value '10.0' is out of range for attribute 'indisk'
>>> imean.inc = 'UVDATA'
>>> print imean.inclass
UVDATA
>>> imean.blc[1:] = [128, 128]
>>> print imean.blc
[None, 128.0, 128.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> imean.blc = AIPSList([256, 256])
>>> print imean.blc
[None, 256.0, 256.0, 0.0, 0.0, 0.0, 0.0, 0.0]
It doesn't hurt to apply AIPSList to a scalar:
>>> AIPSList(1)
1
And it works on matrices (lists of lists) too:
>>> AIPSList([[1,2],[3,4],[5,6]])
[None, [None, 1, 2], [None, 3, 4], [None, 5, 6]]
It should also work for strings:
>>> AIPSList('foobar')
'foobar'
>>> AIPSList(['foo', 'bar'])
[None, 'foo', 'bar']
"""
# Copyright (C) 2005 Joint Institute for VLBI in Europe
# Copyright (C) 2007,2011,2016 Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Generic Python stuff
from __future__ import absolute_import
from __future__ import print_function
import pydoc, select, fcntl, signal
# Global AIPS defaults.
from AIPS import AIPS
# Generic Task implementation.
from Task import Task, List
# Generic Python stuff.
import glob, os, pickle, sys
from six.moves import range
def count_entries(l):
"""Count number of last non blank/zero entries in list l"""
count = len(l)
c = count
if type(l[0])==int: # integers
for j in range(c-1,0,-1):
if (l[j]==0):
count -= 1
else:
break
count = max(4,count) # At least 4
elif type(l[0])==float: # float
for j in range(c-1,0,-1):
if (l[j]==0.0):
count -= 1
else:
break
count = max(2,count) # At least 2
elif type(l[0])==str: # string
for j in range(c-1,0,-1):
if (l[j]==None) or ((len(l[j])>0) and (not l[j].isspace())):
count -= 1
else:
break
count = max(1,count) # At least 1
else: # Something else
count = len(l)
count = min (count, len(l))
# Trap AIPS lists
if l[0]==None:
count -= 1
return count
# end count_entries
class AIPSTask(Task):
"""This class implements running AIPS tasks.
The AIPSTask class, handles client-side task related operations.
Actual task definition and operations are handled by server-side
proxies. For local operations, the server-side functionality is
implemented in the same address space but remote operation is
through an xmlrpc interface. Tasks are run as separate processes
in all cases.
Each defined disk has an associated proxy, either local or remote.
A proxy is a module with interface functions,
local proxies are class modules from subdirectory Proxy with the
same name (i.e. ObitTask) and the server functions are implemented
there. Remote proxies are specified by a URL and a proxy from the
xmlrpclib module is used.
When an object is created, the task secific parameters and
documentation are retrieved by parsing the task Help file.and the
POPSDAT.HLP file for parameter definitions. This is performed on
the server-side.
"""
# Package.
_package = 'AIPS'
# List of adverbs referring to data.
_data_adverbs = ['indata', 'outdata',
'in2data', 'in3data', 'in4data', 'out2data']
# List of adverbs referring to disks.
_disk_adverbs = ['indisk', 'outdisk',
'in2disk', 'in3disk', 'in4disk', 'out2disk']
# List of adverbs referring to file names.
_file_adverbs = ['infile', 'infile2', 'outfile', 'outprint',
'ofmfile', 'boxfile', 'oboxfile']
# Default version.
version = os.environ.get('VERSION', 'NEW')
# Default user number.
userno = 0
# Default verbosity level.
msgkill = 0
# Default to batch mode.
isbatch = 32000
# Run synchronous?
doWait = False
# Logging file?
logFile = ""
def __init__(self, name, **kwds):
""" Create AIPS task object
Creates task object and calls server function to parse the
task help and POPSDAT.HLP files to obtain task specific
parametrs and documentation.
Following is a list of class members:
_default_dict = Dictionary with default values of parameters
_input_list = List of input parameters in order
_output_list = List of output parameters in order
_min_dict = Parameter minimum values as a List
_max_dict = Parameter maximum values as a List
_hlp_dict = Parameter descriptions (list of strings)
as a dictionary
_strlen_dict = String parameter lengths as dictionary
_help_string = Task Help documentation as list of strings
_explain_string = Task Explain documentation as list of strings
_short_help = One line description of task
_message_list = list of execution messages
Current parameter values are given as class members.
"""
if not self._task_type:
self._task_type = 'AIPS'
Task.__init__(self)
self._name = name
self._input_list = []
self._output_list = []
self._message_list = []
self._remainder = "" # Partial message buffer
# Optional arguments.
if 'version' in kwds:
self.version = kwds['version']
else:
if 'AIPS_VERSION' in os.environ:
self.version = os.environ["AIPS_VERSION"]
# Update default user number.
if self.__class__.userno == 0:
self.__class__.userno = AIPS.userno
# See if there is a proxy that can hand us the details for
# this task.
params = None
for proxy in AIPS.proxies:
try:
inst = getattr(proxy, self.__class__.__name__)
params = inst.params(name, self.version)
except Exception as exception:
print(exception)
if AIPS.debuglog:
print(exception, file=AIPS.debuglog)
continue
break
if not params:
msg = "%s task '%s' is not available" % (self._package, name)
raise RuntimeError(msg)
# The XML-RPC proxy will return the details as a dictionary,
# not a class.
self._default_dict = params['default_dict']
self._input_list = params['input_list']
self._output_list = params['output_list']
self._min_dict = params['min_dict']
self._max_dict = params['max_dict']
self._hlp_dict = params['hlp_dict']
self._strlen_dict = params['strlen_dict']
self._help_string = params['help_string']
self._explain_string = params['explain_string']
self._short_help = params['short_help']
if self._task_type=='OBIT':
self._type_dict = params['type_dict']
self._dim_dict = params['dim_dict']
for adverb in self._default_dict:
if type(self._default_dict[adverb]) == list:
value = self._default_dict[adverb]
self._default_dict[adverb] = List(self, adverb, value)
# Initialize all adverbs to their default values.
self.__dict__.update(self._default_dict)
# The maximum value for disk numbers is bogus.
for name in self._disk_adverbs:
if name in self._max_dict:
self._max_dict[name] = float(len(AIPS.disks) - 1)
return # __init__
def __eq__(self, other):
""" Check if two task objects are for the same task """
if self.__class__ != other.__class__:
return False
if self._name != other._name:
return False
if self.userno != other.userno:
return False
for adverb in self._input_list:
if self.__dict__[adverb] != other.__dict__[adverb]:
return False
continue
return True
def copy(self):
""" Return a copy of a given task object"""
task = AIPSTask(self._name, version=self.version)
task.userno = self.userno
for adverb in self._input_list:
task.__dict__[adverb] = self.__dict__[adverb]
continue
return task
def defaults(self):
"""Set adverbs to their defaults."""
self.__dict__.update(self._default_dict)
def __display_adverbs(self, adverbs, file=None):
"""Display task ADVERBS values and descriptions"""
inpList = self._short_help
inpList = inpList + "Adverbs Values "+\
" Comments\n"
inpList = inpList + "------------------------------------------"+\
"-----------------------------------------------------\n"
for adverb in adverbs:
#if self.__dict__[adverb] == '':
# print "'%s': ''" % adverb
#else:
# value = PythonList(self.__dict__[adverb])
# print "'%s': %s" % (adverb, value)
# pass
#continue
i = 0
j = 0
hlps = self._hlp_dict[adverb]
hlp = hlps[j]
value = PythonList(self.__dict__[adverb])
s1 = str(adverb)+" "
# Python is SO hateful
if str(value).startswith('['): # list
# How many nonzero/nonblank entries
ecount = count_entries(value)
s2=""
while i<ecount and 2+len(s2)+len(str(value[i])[:47])<50:
s2 = s2 + str(value[i])[:47]+", "
i = i+1
# remove final comma
if i==ecount:
s2 = s2[:len(s2)-2]
# Left justify
s2 = s2 + " "
inpList = inpList + "%12s%50s%s\n" % (s1[:11], s2[:49], hlp)
# Loop until done with values
doh = 0
while i<ecount:
# continuation of description?
j = j+1
if j<len(hlps):
hlp = hlps[j]
else:
hlp = " "
s2=""
while i<ecount and 2+len(s2)+len(str(value[i])[:47])<50:
s2 = s2 + str(value[i])[:47]+", "
i = i+1
# remove final comma
if i==ecount:
s2 = s2[:len(s2)-2]
# Left justify
s2 = s2 + " "
inpList = inpList + " %50s%s\n" % (s2[:49], hlp)
doh += 1;
if doh>ecount:
break
else: # Scalar
s2 = str(value)+" "
inpList = inpList + "%12s%50s%s\n" % (s1[:11], s2[:49], hlp)
# Any more parameter description lines?
s1 = " "
s2 = " "
j = j + 1
while j<len(hlps):
hlp = hlps[j]
j = j + 1
inpList = inpList + "%12s%50s%s\n" % (s1, s2, hlp)
if file:
fd = open(file, "a")
fd.write(inpList)
fd.close()
else:
pydoc.ttypager(inpList)
del inpList
def inputs(self, file=None):
"""Display all inputs for this task."""
self.__display_adverbs(self._input_list, file=file)
def outputs(self, file=None):
"""Display all outputs for this task."""
self.__display_adverbs(self._output_list, file=file)
def _retype(self, value):
""" Recursively transform a 'List' into a 'list' """
if type(value) == List:
value = list(value)
for i in range(1, len(value)):
value[i] = self._retype(value[i])
return value
def spawn(self):
"""Spawn the task.
Writes task input parameters, task parameter file and starts
the task asynchronously returning immediately. Messages must be
retrieved calling messages.
Returns (proxy, tid)
"""
if self.userno == 0:
raise RuntimeError("AIPS user number is not set")
input_dict = {}
for adverb in self._input_list:
input_dict[adverb] = self._retype(self.__dict__[adverb])
# Figure out what proxy to use for running the task, and
# translate the related disk numbers.
url = None
proxy = None
found = False
for adverb in self._disk_adverbs:
if adverb in input_dict:
disk = int(input_dict[adverb])
if disk == 0:
continue
if not url and not proxy:
url = AIPS.disks[disk].url
proxy = AIPS.disks[disk].proxy()
found = True
if AIPS.disks[disk].url != url:
raise RuntimeError("AIPS disks are not on the same machine")
input_dict[adverb] = float(AIPS.disks[disk].disk)
if not found:
raise RuntimeError("Unable to determine where to execute task")
# Adjust disks for proxy
self.adjust_disk(input_dict, url)
inst = getattr(proxy, self.__class__.__name__)
tid = inst.spawn(self._name, self.version, self.userno,
self.msgkill, self.isbatch, input_dict)
self._message_list = []
return (proxy, tid)
def finished(self, proxy, tid):
"""Determine if task has finished
Determine whether the task specified by PROXY and TID has
finished.
proxy = Proxy giving access to server
tid = Task id in pid table of process
"""
inst = getattr(proxy, self.__class__.__name__)
return inst.finished(tid)
def messages(self, proxy=None, tid=None):
"""Return task messages
Returns list of messages and appends them to the object's
message list.
proxy = Proxy giving access to server
tid = Task id in pid table of process
"""
# Bombs on remote callif not proxy and not tid:
if not tid:
return self._message_list
inst = getattr(proxy, self.__class__.__name__)
messbuff = inst.messages(tid)
#print "MessBuff",messbuff
# Parse messages into complete lines
messages = self.parseMessage(messbuff)
if not messages:
return None
for message in messages:
self._message_list.append(message[1])
if message[0] > abs(self.msgkill):
#print message[1]
pass
continue
return [message[1] for message in messages]
def wait(self, proxy, tid):
"""Wait for the task to finish.
proxy = Proxy giving access to server
tid = Task id in pid table of process
"""
while not self.finished(proxy, tid):
pass
#self.messages(proxy, tid)
inst = getattr(proxy, self.__class__.__name__)
output_dict = inst.wait(tid)
for adverb in self._output_list:
self.__dict__[adverb] = output_dict[adverb]
continue
return
def feed(self, proxy, tid, banana):
"""Feed the task a BANANA.
Pass a message to a running task's sdtin
proxy = Proxy giving access to server
tid = Task id in pid table of process
bananna = text message to pass to task input
"""
inst = getattr(proxy, self.__class__.__name__)
return inst.feed(tid, banana)
def abort(self, proxy, tid, sig=signal.SIGTERM):
"""Abort the task specified by PROXY and TID.
Calls abort function for task tid on proxy.
None return value
proxy = Proxy giving access to server
tid = Task id in pid table of process to be terminated
sig = signal to sent to the task
"""
inst = getattr(proxy, self.__class__.__name__)
return inst.abort(tid, sig)
def go(self):
"""Run the task.
Writes task input parameters in the task parameter file and
starts the task synchronously returning only when the task
terminates. Messages are displayed as generated by the task,
saved in an array returned from the call and, if the task
member logFile is set, written to this file.
"""
(proxy, tid) = self.spawn()
#log = []
count = 0
rotator = ['|\b', '/\b', '-\b', '\\\b']
# Logging to file?
if len(self.logFile)>0:
AIPS.log = open(self.logFile,'a')
else:
AIPS.log = None
try:
try:
while not self.finished(proxy, tid):
messages = self.messages(proxy, tid)
if messages:
for message in messages:
print(message)
if AIPS.log:
if type(message)==str:
x=AIPS.log.write('%s\n' % message)
else:
x=AIPS.log.write('%s\n' % message[1])
#print "DEBUG message",messages
#log.extend(messages)
if AIPS.log:
AIPS.log.flush()
elif sys.stdout.isatty():
#sys.stdout.write(rotator[count % 4])
#sys.stdout.flush()
pass
events = select.select([sys.stdin.fileno()], [], [], 0)
if sys.stdin.fileno() in events[0]:
flags = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, flags)
message = sys.stdin.read(1024)
flags &= ~os.O_NONBLOCK
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, flags)
self.feed(proxy, tid, message)
rotator = []
pass
count += 1
continue
pass
except KeyboardInterrupt as exception:
self.abort(proxy, tid)
raise exception
self.wait(proxy, tid)
finally:
pass
if AIPS.log:
AIPS.log.close()
return #log
def __call__(self):
return self.go()
def __getattr__(self, name):
if name in self._data_adverbs:
class _AIPSData: pass
value = _AIPSData()
prefix = name.replace('data', '')
value.name = Task.__getattr__(self, prefix + 'name')
value.klass = Task.__getattr__(self, prefix + 'class')
value.disk = Task.__getattr__(self, prefix + 'disk')
value.seq = Task.__getattr__(self, prefix + 'seq')
return value
return Task.__getattr__(self, name)
def __setattr__(self, name, value):
if name in self._data_adverbs:
prefix = name.replace('data', '')
Task.__setattr__(self, prefix + 'name', value.name)
Task.__setattr__(self, prefix + 'class', value.klass)
Task.__setattr__(self, prefix + 'disk', value.disk)
Task.__setattr__(self, prefix + 'seq', value.seq)
else:
# We treat 'infile', 'outfile' and 'outprint' special.
# Instead of checking the length of the complete string,
# we only check the length of the final component of the
# pathname. The backend will split of the direcrory
# component and use that as an "area".
attr = self._findattr(name)
#file_adverbs = ['infile', 'outfile', 'outprint']
if attr in self._file_adverbs and type(value) == str and \
os.path.dirname(value):
if len(os.path.basename(value)) > self._strlen_dict[attr] - 2:
msg = "string '%s' is too long for attribute '%s'" \
% (value, attr)
raise ValueError(msg)
self.__dict__[attr] = value
else:
Task.__setattr__(self, name, value)
pass
pass
return
def adjust_disk(self, dict, url):
"""Adjusts disk numbers and sets list of disks for proxy
Also converts lists to normal python lists
dict = task input dictionary
url = url of proxy, None = local
"""
# Fix lists
for x in dict:
if type(dict[x])== self.List or type(dict[x]) == list:
tlist=[]
for y in dict[x][1:]:
# List of AIPS lists?
if (type(y)== self.List or type(y) == list) and (y[0]==None):
y = y[1:]
tlist.append(y)
dict[x] = tlist
# AIPS data
AIPSdirs = []
for x in AIPS.disks:
if x!=None and x.url==url:
AIPSdirs.append(x.dirname)
#
# Save data directories
dict["AIPSdirs"] = AIPSdirs
# Adjust disk numbers, in AIPS everything is a float
for x in self._disk_adverbs:
if x in dict:
diskno = int(dict[x])
i = 1;
# Look for matching AIPS directory name
for y in AIPSdirs:
if AIPS.disks[diskno]:
if y==AIPS.disks[diskno].dirname:
dict[x] = float(i)
break
i = i+1
# DEBUG
#print "DEBUG",dict
return
pass # end class AIPSTask
class AIPSMessageLog:
# Default user number.
userno = -1
def __init__(self):
# Update default user number.
if self.userno == -1:
self.userno = AIPS.userno
return
def zap(self):
"""Zap message log."""
proxy = AIPS.disks[1].proxy()
inst = getattr(proxy, self.__class__.__name__)
return inst.zap(self.userno)
pass # class AIPSMessageLog
def AIPSList(list):
"""Transform a Python array into an AIPS array.
Returns a list suitable for using 1-based indices.
"""
try:
# Make sure we don't consider strings to be lists.
if str(list) == list:
return list
except:
pass
try:
# Insert 'None' at index zero, and transform LIST's elements.
_list = [None]
for l in list:
_list.append(AIPSList(l))
continue
return _list
except:
# Apparently LIST isn't a list; simply return it unchanged.
return list
def PythonList(list):
"""Transform an AIPS array into a Python array.
Returns a list suitable for using normal 0-based indices.
"""
try:
if list[0] != None:
return list
_list = []
for l in list[1:]:
_list.append(PythonList(l))
continue
return _list
except:
# Apparently LIST isn't a list; simply return it unchanged.
return list
# Tests.
if __name__ == '__main__':
import doctest, sys
results = doctest.testmod(sys.modules[__name__])
sys.exit(results[0])
|
"""
Main fallback class.
"""
import types
import numpy as np
import cupy as cp
from cupyx.fallback_mode import utils
class _RecursiveAttr:
"""
RecursiveAttr class to catch all attributes corresponding to numpy,
when user calls fallback_mode. numpy is an instance of this class.
"""
def __init__(self, numpy_object, cupy_object):
self._numpy_object = numpy_object
self._cupy_object = cupy_object
def __getattr__(self, attr):
"""
Catches attributes corresponding to numpy.
Runs recursively till attribute gets called.
Or numpy ScalarType is retrieved.
Args:
attr (str): Attribute of _RecursiveAttr class object.
Returns:
(_RecursiveAttr object, NumPy scalar):
Returns_RecursiveAttr object with new numpy_object, cupy_object.
Returns scalars if requested.
"""
# getting attr
numpy_object = getattr(self._numpy_object, attr)
cupy_object = getattr(self._cupy_object, attr, None)
# Retrieval of NumPy scalars
if isinstance(numpy_object, np.ScalarType):
return numpy_object
return _RecursiveAttr(numpy_object, cupy_object)
def __repr__(self):
if isinstance(self._numpy_object, types.ModuleType):
return "<numpy = module {}, cupy = module {}>".format(
self._numpy_object.__name__,
getattr(self._cupy_object, '__name__', None))
return "<numpy = {}, cupy = {}>".format(
self._numpy_object, self._cupy_object)
def __call__(self, *args, **kwargs):
"""
Gets invoked when last attribute of _RecursiveAttr class gets called.
Calls _cupy_object if not None else call _numpy_object.
Args:
args (tuple): Arguments.
kwargs (dict): Keyword arguments.
Returns:
(res, ndarray): Returns of methods call_cupy or call_numpy
"""
# Not callable objects
if not callable(self._numpy_object):
raise TypeError("'{}' object is not callable".format(
type(self._numpy_object).__name__))
# Execute cupy method
if self._cupy_object is not None:
return utils._call_cupy(self._cupy_object, args, kwargs)
# Execute numpy method
return utils._call_numpy(self._numpy_object, args, kwargs)
numpy = _RecursiveAttr(np, cp)
|
# Faça um programa que leia algo do teclado,
# e diga todas as informações possíveis sobre
# ela.
valor = input('Digite algo: ')
print('\tO tipo disso é: {}'.format(type(valor)))
print('\tÉ alfabético? {}'.format(valor.isalpha()))
print('\tÉ alfanumérico? {}'.format(valor.isalnum()))
print('\tÉ decimal? {}'.format(valor.isdecimal()))
print('\tÉ numérico? {}'.format(valor.isnumeric()))
print('\tÉ espaço? {}'.format(valor.isspace()))
print('\tÉ maiúsculo? {}'.format(valor.isupper()))
print('\tÉ minúsculo? {}'.format(valor.islower()))
|
import numpy as np
import Algorithm_new as AL
import scipy.sparse as sparse
def Example():
beta = np.random.normal(1, 0.1, 30)
freq_1_r = np.random.poisson(0.1, [1000, 100])
freq_2_r = np.random.poisson(0.1, [1000, 100])
freq_1_e = np.random.poisson(0.1, [1000, 100])
freq_2_e = np.random.poisson(0.1, [1000, 100])
Cov_1 = np.diag(np.random.uniform(0, 1, 100))
Cov_2 = np.diag(np.random.uniform(0, 1, 100))
Base = np.random.normal(0, 1, [30, 100])
w2v_1 = np.matmul(Base, Cov_1)
w2v_2 = np.matmul(Base, Cov_2)
beta_01 = np.linspace(-15, 19, 4)
beta_02 = np.linspace(-10, 10, 4)
factor = np.array([[0] * 1000 + [1] * 1000]).T
Y_1_r = [sum((i + beta_01) > 0) for i in (np.matmul(beta, w2v_1) * freq_1_r).sum(axis=1)]
Y_2_r = [sum((i + beta_02) > 0) for i in (np.matmul(beta, w2v_2) * freq_2_r).sum(axis=1)]
Y_1_e = [sum((i + beta_01) > 0) for i in (np.matmul(beta, w2v_1) * freq_1_e).sum(axis=1)]
Y_2_e = [sum((i + beta_02) > 0) for i in (np.matmul(beta, w2v_2) * freq_2_e).sum(axis=1)]
Y_r = np.array(Y_1_r + Y_2_r)
Y_e = np.array(Y_1_e + Y_2_e)
X_r = sparse.csr_matrix(np.concatenate([freq_1_r, freq_2_r]))
X_e = sparse.csr_matrix(np.concatenate([freq_1_e, freq_2_e]))
Data_r = [X_r,factor,Y_r]
Data_e = [X_e,factor,Y_e]
return(Data_r,Data_e,Base)
Data_train,Data_test,Base = Example()
model = AL.algor(Data_train,Base.T)
model.Initialization()
model.Training(1.)
Error_r = np.mean(np.abs(model.Predict(Data_train[0],Data_train[1]) - Data_train[2]))/4
Error_e = np.mean(np.abs(model.Predict(Data_test[0],Data_test[1]) - Data_test[2]))/4
print('The training Error is',Error_r,'The testing Error is',Error_e)
|
# Author: Levente Fodor
# Date: 2019.04.20
# Language: Python 3
'''
u: number of children in node_1
v: number of children in node_2
w: weight between two nodes
myEdge = Edge(0,1,10)
'''
class Edge:
def __init__(self,u,v,w):
self.verticles = [int(u),int(v),int(w)]
def __getitem__(self,index):
return self.verticles[index]
def print_edges(self):
print(f"New edge added: u:{self.verticles[0]}, v:{self.verticles[1]}, w:{self.verticles[2]}")
|
"""Cred artifacts to attach to RFC 453 messages."""
from typing import Mapping
from marshmallow import EXCLUDE, fields
from .......messaging.models.base import BaseModel, BaseModelSchema
from .......messaging.valid import (
INDY_CRED_DEF_ID,
INDY_REV_REG_ID,
INDY_SCHEMA_ID,
NUM_STR_WHOLE,
)
class IndyAttrValue(BaseModel):
"""Indy attribute value."""
class Meta:
"""Indy attribute value."""
schema_class = "IndyAttrValueSchema"
def __init__(self, raw: str = None, encoded: str = None, **kwargs):
"""Initialize indy (credential) attribute value."""
super().__init__(**kwargs)
self.raw = raw
self.encoded = encoded
class IndyAttrValueSchema(BaseModelSchema):
"""Indy attribute value schema."""
class Meta:
"""Indy attribute value schema metadata."""
model_class = IndyAttrValue
unknown = EXCLUDE
raw = fields.Str(
required=True,
description="Attribute raw value",
)
encoded = fields.Str(
required=True,
description="Attribute encoded value",
**NUM_STR_WHOLE,
)
class IndyCredential(BaseModel):
"""Indy credential."""
class Meta:
"""Indy credential metadata."""
schema_class = "IndyCredentialSchema"
def __init__(
self,
schema_id: str = None,
cred_def_id: str = None,
rev_reg_id: str = None,
values: Mapping[str, IndyAttrValue] = None,
signature: Mapping = None,
signature_correctness_proof: Mapping = None,
rev_reg: Mapping = None,
witness: Mapping = None,
):
"""Initialize indy credential."""
self.schema_id = schema_id
self.cred_def_id = cred_def_id
self.rev_reg_id = rev_reg_id
self.values = values
self.signature = signature
self.signature_correctness_proof = signature_correctness_proof
self.rev_reg = rev_reg
self.witness = witness
class IndyCredentialSchema(BaseModelSchema):
"""Indy credential schema."""
class Meta:
"""Indy credential schemametadata."""
model_class = IndyCredential
unknown = EXCLUDE
schema_id = fields.Str(
required=True,
description="Schema identifier",
**INDY_SCHEMA_ID,
)
cred_def_id = fields.Str(
required=True,
description="Credential definition identifier",
**INDY_CRED_DEF_ID,
)
rev_reg_id = fields.Str(
allow_none=True,
description="Revocation registry identifier",
**INDY_REV_REG_ID,
)
values = fields.Dict(
keys=fields.Str(description="Attribute name"),
values=fields.Nested(
IndyAttrValueSchema(),
description="Attribute value",
),
required=True,
description="Credential attributes",
)
signature = fields.Dict(
required=True,
description="Credential signature",
)
signature_correctness_proof = fields.Dict(
required=True,
description="Credential signature correctness proof",
)
rev_reg = fields.Dict(
allow_none=True,
description="Revocation registry state",
)
witness = fields.Dict(
allow_none=True,
description="Witness for revocation proof",
)
|
FAKE_DATA = [{"close_0": 296.0, "time": 1577743200000, "myVar1": 269.5, "open_0": 123.0},
{"close_0": 155.0, "time": 1577829600000, "myVar1": 129.25, "open_0": 121.5},
{"close_0": 182.5, "time": 1577916000000, "myVar1": 129.0, "open_0": 121.5},
{"close_0": 162.5, "time": 1578002400000, "myVar1": 129.0, "open_0": 24.5},
{"close_0": 127.5, "time": 1578088800000, "myVar1": 179.5, "open_0": 129.5},
{"close_0": 124.0, "time": 1578175200000, "myVar1": 129.5, "open_0": 125.0},
{"close_0": 229.5, "time": 1578261600000, "myVar1": 219.0, "open_0": 122.5},
{"close_0": 228.0, "time": 1578348000000, "myVar1": 189.5, "open_0": 147.0},
{"close_0": 288.0, "time": 1578434400000, "myVar1": 219.75, "open_0": 151.5},
{"close_0": 337.5, "time": 1578520800000, "myVar1": 259.25, "open_0": 165.0},
{"close_0": 349.5, "time": 1578607200000, "myVar1": 229.5, "open_0": 215.5},
{"close_0": 342.0, "time": 1578693600000, "myVar1": 219.0, "open_0": 132.0},
{"close_0": 552.5, "time": 1578780000000, "myVar1": 252.0, "open_0": 151.5},
{"close_0": 300.0, "time": 1578866400000, "myVar1": 239.75, "open_0": 163.5},
{"close_0": 421.0, "time": 1578952800000, "myVar1": 267.0, "open_0": 213.0},
{"close_0": 436.0, "time": 1579039200000, "myVar1": 269.25, "open_0": 184.5},
{"close_0": 464.0, "time": 1579125600000, "myVar1": 206.25, "open_0": 148.5},
{"close_0": 258.0, "time": 1579212000000, "myVar1": 209.0, "open_0": 150.0},
{"close_0": 307.0, "time": 1579298400000, "myVar1": 159.75, "open_0": 100.5},
{"close_0": 190.5, "time": 1579384800000, "myVar1": 156.0, "open_0": 121.5},
{"close_0": 205.5, "time": 1579471200000, "myVar1": 132.0, "open_0": 58.5},
{"close_0": 190.5, "time": 1579557600000, "myVar1": 150.0, "open_0": 109.5},
{"close_0": 231.0, "time": 1579644000000, "myVar1": 174.0, "open_0": 117.0},
{"close_0": 190.5, "time": 1579730400000, "myVar1": 182.25, "open_0": 174.0},
{"close_0": 197.0, "time": 1579816800000, "myVar1": 169.5, "open_0": 104.0},
{"close_0": 411.5, "time": 1579903200000, "myVar1": 159.0, "open_0": 108.5},
{"close_0": 419.5, "time": 1579989600000, "myVar1": 139.25, "open_0": 113.0},
{"close_0": 261.5, "time": 1580076000000, "myVar1": 189.5, "open_0": 101.5},
{"close_0": 408.0, "time": 1580162400000, "myVar1": 219.75, "open_0": 131.5},
{"close_0": 540.5, "time": 1580248800000, "myVar1": 258.0, "open_0": 185.5},
{"close_0": 341.0, "time": 1580335200000, "myVar1": 243.75, "open_0": 116.5},
{"close_0": 307.0, "time": 1580421600000, "myVar1": 278.25, "open_0": 149.5},
{"close_0": 311.5, "time": 1580508000000, "myVar1": 309.5, "open_0": 291.5},
{"close_0": 345.0, "time": 1580594400000, "myVar1": 306.75, "open_0": 118.5},
{"close_1": 156.0, "time": 1577743200000, "myVar2": 169.5, "open_1": 183.0},
{"close_1": 105.0, "time": 1577829600000, "myVar2": 143.25, "open_1": 181.5},
{"close_1": 82.5, "time": 1577916000000, "myVar2": 117.0, "open_1": 151.5},
{"close_1": 67.5, "time": 1578002400000, "myVar2": 111.0, "open_1": 154.5},
{"close_1": 127.5, "time": 1578088800000, "myVar2": 178.5, "open_1": 229.5},
{"close_1": 174.0, "time": 1578175200000, "myVar2": 199.5, "open_1": 225.0},
{"close_1": 229.5, "time": 1578261600000, "myVar2": 216.0, "open_1": 202.5},
{"close_1": 228.0, "time": 1578348000000, "myVar2": 187.5, "open_1": 147.0},
{"close_1": 288.0, "time": 1578434400000, "myVar2": 219.75, "open_1": 151.5},
{"close_1": 337.5, "time": 1578520800000, "myVar2": 251.25, "open_1": 165.0},
{"close_1": 349.5, "time": 1578607200000, "myVar2": 292.5, "open_1": 235.5},
{"close_1": 348.0, "time": 1578693600000, "myVar2": 270.0, "open_1": 192.0},
{"close_1": 352.5, "time": 1578780000000, "myVar2": 252.0, "open_1": 151.5},
{"close_1": 300.0, "time": 1578866400000, "myVar2": 231.75, "open_1": 163.5},
{"close_1": 321.0, "time": 1578952800000, "myVar2": 267.0, "open_1": 213.0},
{"close_1": 336.0, "time": 1579039200000, "myVar2": 260.25, "open_1": 184.5},
{"close_1": 264.0, "time": 1579125600000, "myVar2": 206.25, "open_1": 148.5},
{"close_1": 258.0, "time": 1579212000000, "myVar2": 204.0, "open_1": 150.0},
{"close_1": 207.0, "time": 1579298400000, "myVar2": 153.75, "open_1": 100.5},
{"close_1": 190.5, "time": 1579384800000, "myVar2": 156.0, "open_1": 121.5},
{"close_1": 205.5, "time": 1579471200000, "myVar2": 132.0, "open_1": 58.5},
{"close_1": 190.5, "time": 1579557600000, "myVar2": 150.0, "open_1": 109.5},
{"close_1": 231.0, "time": 1579644000000, "myVar2": 174.0, "open_1": 117.0},
{"close_1": 190.5, "time": 1579730400000, "myVar2": 182.25, "open_1": 174.0},
{"close_1": 117.0, "time": 1579816800000, "myVar2": 160.5, "open_1": 204.0},
{"close_1": 91.5, "time": 1579903200000, "myVar2": 150.0, "open_1": 208.5},
{"close_1": 19.5, "time": 1579989600000, "myVar2": 131.25, "open_1": 243.0},
{"close_1": 61.5, "time": 1580076000000, "myVar2": 181.5, "open_1": 301.5},
{"close_1": 108.0, "time": 1580162400000, "myVar2": 219.75, "open_1": 331.5},
{"close_1": 130.5, "time": 1580248800000, "myVar2": 258.0, "open_1": 385.5},
{"close_1": 171.0, "time": 1580335200000, "myVar2": 243.75, "open_1": 316.5},
{"close_1": 207.0, "time": 1580421600000, "myVar2": 278.25, "open_1": 349.5},
{"close_1": 211.5, "time": 1580508000000, "myVar2": 301.5, "open_1": 391.5},
{"close_1": 195.0, "time": 1580594400000, "myVar2": 306.75, "open_1": 418.5}
]
GAUGE_DATA = 130
GAUGE_DATA2 = [{
"category": "Power",
"value_on": 130,
"value_off": 70
}];
COLUMNCHART_DATA = [{
"country": "USA",
"year2017": 3.5,
"year2018": 4.2
}, {
"country": "UK",
"year2017": 1.7,
"year2018": 3.1
}, {
"country": "Canada",
"year2017": 2.8,
"year2018": 2.9
}, {
"country": "Japan",
"year2017": 2.6,
"year2018": 2.3
}, {
"country": "France",
"year2017": 1.4,
"year2018": 2.1
}, {
"country": "Brazil",
"year2017": 2.6,
"year2018": 4.9
}, {
"country": "Russia",
"year2017": 6.4,
"year2018": 7.2
}, {
"country": "India",
"year2017": 8,
"year2018": 7.1
}, {
"country": "China",
"year2017": 9.9,
"year2018": 10.1
}]
CL_COLUMNCHART_DATA = [{
"level": "inter",
"Greece": 10000,
"Spain": 30000,
}, {
"level": "amateur",
"Greece": 30000,
"Spain": 10000
}, {
"country": "professional",
"Greece": 300000,
"Spain": 200000
}]
BAR_RANGE_CHART_DATA = [
{
"name": "John",
"fromDate": "2018-01-01 08:00",
"toDate": "2018-01-01 10:00",
},
{
"name": "John",
"fromDate": "2018-01-01 12:00",
"toDate": "2018-01-01 15:00",
},
{
"name": "John",
"fromDate": "2018-01-01 15:30",
"toDate": "2018-01-01 21:30",
},
{
"name": "Jane",
"fromDate": "2018-01-01 09:00",
"toDate": "2018-01-01 12:00",
},
{
"name": "Jane",
"fromDate": "2018-01-01 13:00",
"toDate": "2018-01-01 17:00",
},
{
"name": "Peter",
"fromDate": "2018-01-01 11:00",
"toDate": "2018-01-01 16:00",
},
{
"name": "Peter",
"fromDate": "2018-01-01 16:00",
"toDate": "2018-01-01 19:00",
},
{
"name": "Melania",
"fromDate": "2018-01-01 16:00",
"toDate": "2018-01-01 20:00",
},
{
"name": "Melania",
"fromDate": "2018-01-01 20:30",
"toDate": "2018-01-01 24:00",
},
{
"name": "Donald",
"fromDate": "2018-01-01 13:00",
"toDate": "2018-01-01 24:00",
}
]
BAR_RANGE_CHART_DATA_2 = [
{
"region": "MENA",
"name": "TIAM",
"from": 5,
"to": 10,
},
{
"region": "Russia",
"name": "GCAM",
"from": 11,
"to": 25,
},
{
"region": "Latin America",
"name": "42",
"from": 10,
"to": 20,
},
{
"region": "India",
"name": "ALADIN",
"from": 20,
"to": 25,
},
{
"region": "Rest of Asia",
"name": "ICES",
"from": 25,
"to": 30,
},
{
"region": "Africa",
"name": "NATEM",
"from": 30,
"to": 35,
},
{
"region": "China",
"name": "MAPLE",
"from": 15,
"to": 40,
},
{
"region": "Europe",
"name": "CONTO",
"from": 40,
"to": 45,
},
{
"region": "Pacific OECD",
"name": "NEMESIS",
"from": 35,
"to": 50,
},
{
"region": "North America",
"name": "LEAP",
"from": 40,
"to": 55,
},
{
"region": "World",
"name": "FORECAST",
"from": 50,
"to": 60,
}
]
BAR_HEATMAP_DATA = [{
"category": "Research",
"value": 450
}, {
"category": "Marketing",
"value": 1200
}, {
"category": "Distribution",
"value": 1850
}, {
"category": "HR",
"value": 850
}, {
"category": "Sales",
"value": 499
}, {
"category": "Support",
"value": 871
}, {
"category": "Other",
"value": 512
}]
BAR_HEATMAP_DATA_2 = [{
"category": "GCAM",
"precision": 45
}, {
"category": "TIAM",
"precision": 12
}, {
"category": "MUSE",
"precision": 18.5
}, {
"category": "42",
"precision": 8.5
}, {
"category": "GEMINI-E3",
"precision": 49.9
}, {
"category": "ICES",
"precision": 8.7
}, {
"category": "DICE",
"precision": 51.2
},
{
"category": "E3ME",
"precision": 5.1
},
{
"category": "ALADIN",
"precision": 12
},
{
"category": "FORECAST",
"precision": 20
},
{
"category": "LEAP",
"precision": 33
},
{
"category": "EU-TIMES",
"precision": 42
},
{
"category": "NEMESIS",
"precision": 62
},
{
"category": "CONTO",
"precision": 27
},
{
"category": "MARKAL-India",
"precision": 51
},
{
"category": "MAPLE",
"precision": 24
},
{
"category": "NATEM",
"precision": 77
},
{
"category": "SISGEMA",
"precision": 88
},
{
"category": "TIMES-CAC",
"precision": 33
},
]
SANKEY_FAKE_TRAJECTORY = [
{'from': 'Product Manager', 'to': 'Senior advisory', 'value': 1, "missing_skills": "senior_adv_missing_skills" },
{'from': 'Software Architect', 'to': 'Product Manager', 'value': 100, "missing_skills": "product_m_missing_skills"},
{'from': 'Full-Stack Developer', 'to': 'Product Manager', 'value': 100, "missing_skills": "product_m_missing_skills"},
{'from': 'Server Service Expert', 'to': 'Software Architect', 'value': 10, "missing_skills": "sa_missing_skills"},
{'from': 'Back-End Developer', 'to': 'Server Service Expert', 'value': 10, "missing_skills": "sse_missing_skills"},
{'from': 'Junior programmer', 'to': 'Front-End Developer', 'value': 10, "missing_skills": "fe_d_missing_skills"},
{'from': 'Front-End Developer', 'to': 'Full-Stack Developer', 'value': 1, "missing_skills": "fsd_missing_skills"},
{'from': 'Senior Developer', 'to': 'Lead Developer', 'value': 11, "missing_skills": "l_d_missing_skills"},
{'from': 'Intern programmer', 'to': 'Junior programmer', 'value': 1, "missing_skills": "j_p_missing_skills"},
{'from': 'Senior advisory', 'to': 'End of career', 'value': 20, "missing_skills": "e_o_c_missing_skills"},
{'from': 'Junior programmer', 'to': 'Senior Developer', 'value': 90, "missing_skills": "s_d_missing_skills"},
{'from': 'Intern programmer', 'to': 'Back-End Developer', 'value': 100, "missing_skills": "b_d_missing_skills"}
]
# ordered by hierarchy in order to appear correctly on the sankey diagram
SANKEY_FAKE_TRAJECTORY2 = [
{'from': 'Intern programmer', 'to': 'Back-End Developer', 'value': 5, 'missing_skills': []},
{'from': 'Intern programmer', 'to': 'Junior programmer', 'value': 5, 'missing_skills': []},
{'from': 'Back-End Developer', 'to': 'Server Service Expert', 'value': 5, 'missing_skills': []},
{'from': 'Junior programmer', 'to': 'Front-End Developer', 'value': 5, 'missing_skills': []},
{'from': 'Junior programmer', 'to': 'Senior Developer', 'value': 5, 'missing_skills': []},
{'from': 'Server Service Expert', 'to': 'Software Architect', 'value': 5, 'missing_skills': []},
{'from': 'Front-End Developer', 'to': 'Full-Stack Developer', 'value': 5, 'missing_skills': []},
{'from': 'Senior Developer', 'to': 'Lead Developer', 'value': 5, 'missing_skills': []},
{'from': 'Software Architect', 'to': 'Product Manager', 'value': 5, 'missing_skills': []},
{'from': 'Full-Stack Developer', 'to': 'Product Manager', 'value': 5, 'missing_skills': []},
{'from': 'Product Manager', 'to': 'Senior advisory', 'value': 5, 'missing_skills': []},
{'from': 'Senior advisory', 'to': 'End of career', 'value': 5,
'missing_skills': ['Final skill for end of career']},
]
SANKEY_FAKE_TRAJECTORY3 = [
{'from': 'Intern programmer', 'to': 'Back-End Developer', 'value': 100, 'missing_skills': []},
{'from': 'Intern programmer', 'to': 'Junior programmer', 'value': 100, 'missing_skills': []},
{'from': 'Back-End Developer', 'to': 'Server Service Expert', 'value': 10, 'missing_skills': []},
{'from': 'Junior programmer', 'to': 'Front-End Developer', 'value': 100, 'missing_skills': []},
{'from': 'Junior programmer', 'to': 'Senior Developer', 'value': 100, 'missing_skills': []},
{'from': 'Server Service Expert', 'to': 'Software Architect', 'value': 10, 'missing_skills': []},
{'from': 'Front-End Developer', 'to': 'Full-Stack Developer', 'value': 10, 'missing_skills': []},
{'from': 'Senior Developer', 'to': 'Lead Developer', 'value': 100, 'missing_skills': []},
{'from': 'Software Architect', 'to': 'Product Manager', 'value': 10, 'missing_skills': []},
{'from': 'Full-Stack Developer', 'to': 'Product Manager', 'value': 10, 'missing_skills': []},
{'from': 'Product Manager', 'to': 'Senior advisory', 'value': 10, 'missing_skills': []},
{'from': 'Senior advisory', 'to': 'End of career', 'value': 10,
'missing_skills': ['Final skill for end of career']},
]
HEAT_MAP_DATA_FOR_MAP = [{"id": "US", "value": 18}, {"id": "MF", "value": 15}, {"id": "NU", "value": 16},
{"id": "BF", "value": 18}, {"id": "TF", "value": 13}, {"id": "GU", "value": 16},
{"id": "NC", "value": 17}, {"id": "TV", "value": 16}, {"id": "CC", "value": 16},
{"id": "MS", "value": 17}, {"id": "UA", "value": 20}, {"id": "CK", "value": 17},
{"id": "GB", "value": 24}, {"id": "HM", "value": 13}, {"id": "NZ", "value": 17},
{"id": "CX", "value": 15}, {"id": "KY", "value": 17}, {"id": "RU", "value": 18},
{"id": "VG", "value": 16}, {"id": "BB", "value": 18}, {"id": "NF", "value": 16},
{"id": "CF", "value": 18}, {"id": "GL", "value": 15}, {"id": "VC", "value": 18},
{"id": "AE", "value": 18}, {"id": "FK", "value": 16}, {"id": "LY", "value": 18},
{"id": "WF", "value": 14}, {"id": "AQ", "value": 13}, {"id": "SJ", "value": 14},
{"id": "GP", "value": 16}, {"id": "AG", "value": 17}, {"id": "TK", "value": 16},
{"id": "SB", "value": 17}, {"id": "GF", "value": 16}, {"id": "TW", "value": 17},
{"id": "SX", "value": 15}, {"id": "EH", "value": 17}, {"id": "PN", "value": 16},
{"id": "GE", "value": 19}, {"id": "VA", "value": 14}, {"id": "IE", "value": 27},
{"id": "SY", "value": 18}, {"id": "LC", "value": 18}, {"id": "GD", "value": 18},
{"id": "RE", "value": 16}, {"id": "AX", "value": 13}, {"id": "CA", "value": 18},
{"id": "TM", "value": 18}, {"id": "BM", "value": 18}, {"id": "YE", "value": 18},
{"id": "MN", "value": 17}, {"id": "BL", "value": 15}, {"id": "IM", "value": 13},
{"id": "PM", "value": 15}, {"id": "JP", "value": 18}, {"id": "CW", "value": 15},
{"id": "PF", "value": 16}, {"id": "AI", "value": 17}, {"id": "SH", "value": 16},
{"id": "BZ", "value": 17}, {"id": "CZ", "value": 26}, {"id": "BV", "value": 13},
{"id": "AS", "value": 16}, {"id": "RO", "value": 28}, {"id": "LA", "value": 17},
{"id": "FO", "value": 13}, {"id": "SS", "value": 18}, {"id": "TC", "value": 16},
{"id": "BN", "value": 17}, {"id": "AN", "value": 18}, {"id": "MY", "value": 17},
{"id": "BA", "value": 21}, {"id": "VI", "value": 16}, {"id": "DO", "value": 18},
{"id": "AW", "value": 17}, {"id": "GS", "value": 13}, {"id": "GI", "value": 15},
{"id": "IO", "value": 14}, {"id": "SK", "value": 25}, {"id": "JM", "value": 18},
{"id": "MQ", "value": 16}, {"id": "YT", "value": 16}, {"id": "KG", "value": 18},
{"id": "UM", "value": 16}, {"id": "BI", "value": 18}, {"id": "ZA", "value": 19},
{"id": "TN", "value": 18}, {"id": "JO", "value": 18}, {"id": "TZ", "value": 18},
{"id": "NO", "value": 26}, {"id": "GG", "value": 13}, {"id": "ZW", "value": 18},
{"id": "AO", "value": 18}, {"id": "KM", "value": 17}, {"id": "MG", "value": 18},
{"id": "ES", "value": 29}, {"id": "ET", "value": 18}, {"id": "LI", "value": 14},
{"id": "FJ", "value": 17}, {"id": "CM", "value": 18}, {"id": "TD", "value": 18},
{"id": "SC", "value": 17}, {"id": "CH", "value": 22}, {"id": "CU", "value": 18},
{"id": "MT", "value": 20}, {"id": "MV", "value": 17}, {"id": "PG", "value": 17},
{"id": "HK", "value": 16}, {"id": "MO", "value": 16}, {"id": "RS", "value": 23},
{"id": "LR", "value": 18}, {"id": "PA", "value": 18}, {"id": "PH", "value": 17},
{"id": "SA", "value": 17}, {"id": "GA", "value": 18}, {"id": "RW", "value": 18},
{"id": "MP", "value": 16}, {"id": "AR", "value": 17}, {"id": "ST", "value": 18},
{"id": "CV", "value": 18}, {"id": "FR", "value": 30}, {"id": "IR", "value": 17},
{"id": "KI", "value": 17}, {"id": "SM", "value": 14}, {"id": "PR", "value": 16},
{"id": "CR", "value": 18}, {"id": "BR", "value": 20}, {"id": "MZ", "value": 18},
{"id": "BW", "value": 18}, {"id": "OM", "value": 18}, {"id": "CN", "value": 18},
{"id": "GN", "value": 18}, {"id": "ZM", "value": 18}, {"id": "ER", "value": 18},
{"id": "ID", "value": 18}, {"id": "SE", "value": 26}, {"id": "MH", "value": 16},
{"id": "MD", "value": 20}, {"id": "BD", "value": 17}, {"id": "TT", "value": 18},
{"id": "NR", "value": 16}, {"id": "CO", "value": 18}, {"id": "MR", "value": 18},
{"id": "PT", "value": 25}, {"id": "AT", "value": 26}, {"id": "GW", "value": 18},
{"id": "IL", "value": 18}, {"id": "QA", "value": 18}, {"id": "SZ", "value": 18},
{"id": "VU", "value": 17}, {"id": "SN", "value": 18}, {"id": "BO", "value": 18},
{"id": "PW", "value": 16}, {"id": "CL", "value": 17}, {"id": "HU", "value": 26},
{"id": "NI", "value": 18}, {"id": "BT", "value": 17}, {"id": "GY", "value": 18},
{"id": "SL", "value": 18}, {"id": "EE", "value": 23}, {"id": "IT", "value": 29},
{"id": "BJ", "value": 18}, {"id": "CI", "value": 18}, {"id": "ML", "value": 18},
{"id": "TG", "value": 18}, {"id": "CY", "value": 20}, {"id": "KN", "value": 17},
{"id": "LU", "value": 25}, {"id": "BY", "value": 19}, {"id": "SI", "value": 24},
{"id": "HN", "value": 18}, {"id": "MC", "value": 15}, {"id": "BS", "value": 18},
{"id": "IS", "value": 19}, {"id": "BG", "value": 23}, {"id": "TR", "value": 18},
{"id": "MX", "value": 18}, {"id": "JE", "value": 13}, {"id": "HT", "value": 18},
{"id": "CD", "value": 18}, {"id": "CG", "value": 18}, {"id": "ME", "value": 22},
{"id": "VN", "value": 17}, {"id": "DM", "value": 18}, {"id": "NA", "value": 18},
{"id": "MA", "value": 18}, {"id": "AU", "value": 17}, {"id": "PY", "value": 18},
{"id": "KW", "value": 18}, {"id": "SR", "value": 18}, {"id": "BE", "value": 26},
{"id": "KR", "value": 15}, {"id": "GT", "value": 18}, {"id": "VE", "value": 18},
{"id": "DJ", "value": 18}, {"id": "EC", "value": 18}, {"id": "TH", "value": 17},
{"id": "IQ", "value": 18}, {"id": "LT", "value": 25}, {"id": "MK", "value": 22},
{"id": "GM", "value": 18}, {"id": "PS", "value": 17}, {"id": "LB", "value": 18},
{"id": "TL", "value": 16}, {"id": "TJ", "value": 18}, {"id": "SV", "value": 18},
{"id": "EG", "value": 18}, {"id": "NG", "value": 18}, {"id": "KZ", "value": 18},
{"id": "DZ", "value": 18}, {"id": "PL", "value": 25}, {"id": "GR", "value": 29},
{"id": "HR", "value": 24}, {"id": "PE", "value": 18}, {"id": "WS", "value": 17},
{"id": "AM", "value": 19}, {"id": "PK", "value": 16}, {"id": "AZ", "value": 19},
{"id": "LV", "value": 28}, {"id": "TO", "value": 17}, {"id": "UY", "value": 18},
{"id": "FI", "value": 25}, {"id": "SD", "value": 18}, {"id": "AD", "value": 15},
{"id": "MM", "value": 17}, {"id": "LS", "value": 18}, {"id": "DE", "value": 30},
{"id": "NP", "value": 17}, {"id": "KH", "value": 17}, {"id": "KE", "value": 18},
{"id": "NE", "value": 18}, {"id": "AF", "value": 17}, {"id": "DK", "value": 26},
{"id": "BH", "value": 18}, {"id": "IN", "value": 18}, {"id": "BQ", "value": 15},
{"id": "MW", "value": 18}, {"id": "GH", "value": 18}, {"id": "UZ", "value": 18},
{"id": "SG", "value": 17}, {"id": "NL", "value": 26}, {"id": "MU", "value": 18},
{"id": "GQ", "value": 18}, {"id": "LK", "value": 17}, {"id": "FM", "value": 16},
{"id": "SO", "value": 18}, {"id": "AL", "value": 22}, {"id": "UG", "value": 18},
{"id": "KP", "value": 17}, {"id": "XK", "value": 13}, {"id": "XX", "value": 13}]
HEAT_MAP_DATA = [
{
"id": "GR",
"value": 30
},
{
"id": "FR",
"value": 45
},
{
"id": "US",
"value": 32
},
{
"id": "SP",
"value": 40
},
{
"id": "IN",
"value": 39
},
{
"id": "IT",
"value": 25
},
{
"id": "GE",
"value": 33
},
{
"id": "PL",
"value": 40
},
{
"id": "CA",
"value": 24
},
{
"id": "RU",
"value": 18
},
]
PIE_CHART_DATA = [
{
"country": "Lithuania",
"oil_consumption": 501.9
},
{
"country": "Czech Republic",
"oil_consumption": 301.9
},
{
"country": "Ireland",
"oil_consumption": 201.1
},
{
"country": "Germany",
"oil_consumption": 165.8
},
{
"country": "Australia",
"oil_consumption": 139.9
},
{
"country": "Austria",
"oil_consumption": 128.3
},
{
"country": "UK",
"oil_consumption": 99
},
{
"country": "Belgium",
"oil_consumption": 60
},
{
"country": "The Netherlands",
"oil_consumption": 50
}
]
RADAR_CHART_DATA = [
{
"country": "Lithuania",
"oil_consumption": 501.9,
"energy_consumption": 700
},
{
"country": "Czech Republic",
"oil_consumption": 301.9,
"energy_consumption": 900
},
{
"country": "Ireland",
"oil_consumption": 201.1,
"energy_consumption": 750
},
{
"country": "Germany",
"oil_consumption": 165.8,
"energy_consumption": 300
},
{
"country": "Australia",
"oil_consumption": 139.9,
"energy_consumption": 200
},
{
"country": "Austria",
"oil_consumption": 128.3,
"energy_consumption": 500
},
{
"country": "UK",
"oil_consumption": 99,
"energy_consumption": 700
},
{
"country": "Belgium",
"oil_consumption": 60,
"energy_consumption": 200
},
{
"country": "The Netherlands",
"oil_consumption": 50,
"energy_consumption": 700
}
]
SANKEYCHORD_DATA = [
{"from": "A", "to": "D", "value": 10},
{"from": "B", "to": "D", "value": 8},
{"from": "B", "to": "E", "value": 4},
{"from": "C", "to": "E", "value": 3},
{"from": "D", "to": "G", "value": 5},
{"from": "D", "to": "I", "value": 2},
{"from": "D", "to": "H", "value": 3},
{"from": "E", "to": "H", "value": 6},
{"from": "G", "to": "J", "value": 5},
{"from": "I", "to": "J", "value": 1},
{"from": "H", "to": "J", "value": 9}
]
SANKEYCHORD_DATA_2 = [
{"from": "Solar", "to": "Electricity grid", "value": 4},
{"from": "Solar", "to": "Buildings", "value": 14},
{"from": "Solar", "to": "Losses/ own use", "value": 3},
{"from": "Wind", "to": "Electricity grid", "value": 12},
{"from": "Geothermal", "to": "Electricity grid", "value": 2},
{"from": "Geothermal", "to": "Losses/ own use", "value": 16},
{"from": "Hydro", "to": "Electricity grid", "value": 23},
{"from": "Nuclear", "to": "Power plants", "value": 36},
{"from": "Coal reserves", "to": "Solid", "value": 184},
{"from": "Coal reserves", "to": "Losses/ own use", "value": 38},
{"from": "Oil reserves", "to": "Liquid", "value": 278},
{"from": "Oil reserves", "to": "Losses/ own use", "value": 34},
{"from": "Gas reserves", "to": "Gas", "value": 158},
{"from": "Gas reserves", "to": "Losses/ own use", "value": 40},
{"from": "Biomass and waste", "to": "Solid", "value": 44},
{"from": "Biomass and waste", "to": "Liquid", "value": 7},
{"from": "Biomass and waste", "to": "Gas", "value": 8},
{"from": "Biomass and waste", "to": "Losses/ own use", "value": 8},
{"from": "Electricity grid", "to": "Buildings", "value": 86},
{"from": "Electricity grid", "to": "Manufacturing", "value": 47},
{"from": "Electricity grid", "to": "Transport", "value": 7},
{"from": "Electricity grid", "to": "Other", "value": 3},
{"from": "Electricity grid", "to": "Losses/ own use", "value": 12},
{"from": "Solid", "to": "Buildings", "value": 28},
{"from": "Solid", "to": "Manufacturing", "value": 76},
{"from": "Power plants", "to": "Losses/ own use", "value": 136},
{"from": "Liquid", "to": "Heat", "value": 1},
{"from": "Liquid", "to": "Buildings", "value": 12},
{"from": "Liquid", "to": "Manufacturing", "value": 66},
{"from": "Liquid", "to": "Transport", "value": 157},
{"from": "Liquid", "to": "Other", "value": 4},
{"from": "Gas", "to": "Buildings", "value": 35},
{"from": "Gas", "to": "Manufacturing", "value": 48},
{"from": "Gas", "to": "Transport", "value": 4}
]
SANKEY_DATA_3 = [
{
"id": "job1",
"position": "Programmer",
"score": 80,
"nextpositions": [
{
"id": "job2",
"position": "senior developer",
"score": 60,
"nextpositions": [
{
"id": "job3",
"position": "software architect",
"score": 30,
"nextpositions": [
{
"id": "job4",
"position": "project manager",
"score": 20,
"nextpositions": [
{
"id": "job8",
"position": "cto",
"score": 20,
"nextpositions": []
}
]
},
{
"id": "job5",
"position": "product manager",
"score": 10,
"nextpositions": []
}
]
},
{
"id": "job6",
"position": "lead developer",
"score": 30,
"nextpositions": [
{
"id": "job7",
"position": "development manager",
"score": 30,
"nextpositions": [
{
"id": "job8",
"position": "cto",
"score": 30,
"nextpositions": []
}
]
}
]
}
]
}
]
}
]
THERMOMETER = [
{
"date": "2012-07-27",
"value": 13
}, {
"date": "2012-07-28",
"value": 11
}, {
"date": "2012-07-29",
"value": 15
}, {
"date": "2012-07-30",
"value": 16
}, {
"date": "2012-07-31",
"value": 18
}, {
"date": "2012-08-01",
"value": 13
}, {
"date": "2012-08-02",
"value": 22
}, {
"date": "2012-08-03",
"value": 23
}, {
"date": "2012-08-04",
"value": 20
}, {
"date": "2012-08-05",
"value": 17
}, {
"date": "2012-08-06",
"value": 16
}, {
"date": "2012-08-07",
"value": 18
}, {
"date": "2012-08-08",
"value": 21
}, {
"date": "2012-08-09",
"value": 26
}, {
"date": "2012-08-10",
"value": 24
}, {
"date": "2012-08-11",
"value": 29
}, {
"date": "2012-08-12",
"value": 32
}, {
"date": "2012-08-13",
"value": 18
}, {
"date": "2012-08-14",
"value": 24
}, {
"date": "2012-08-15",
"value": 22
}, {
"date": "2012-08-16",
"value": 18
}, {
"date": "2012-08-17",
"value": 19
}, {
"date": "2012-08-18",
"value": 14
}, {
"date": "2012-08-19",
"value": 15
}, {
"date": "2012-08-20",
"value": 12
}, {
"date": "2012-08-21",
"value": 8
}, {
"date": "2012-08-22",
"value": 9
}, {
"date": "2012-08-23",
"value": 8
}, {
"date": "2012-08-24",
"value": 7
}, {
"date": "2012-08-25",
"value": 5
}, {
"date": "2012-08-26",
"value": 11
}, {
"date": "2012-08-27",
"value": 13
}, {
"date": "2012-08-28",
"value": 18
}, {
"date": "2012-08-29",
"value": 20
}, {
"date": "2012-08-30",
"value": 29
}, {
"date": "2012-08-31",
"value": 33
}, {
"date": "2012-09-01",
"value": 42
}, {
"date": "2012-09-02",
"value": 35
}, {
"date": "2012-09-03",
"value": 31
}, {
"date": "2012-09-04",
"value": 47
}, {
"date": "2012-09-05",
"value": 52
}, {
"date": "2012-09-06",
"value": 46
}, {
"date": "2012-09-07",
"value": 41
}, {
"date": "2012-09-08",
"value": 43
}, {
"date": "2012-09-09",
"value": 40
}, {
"date": "2012-09-10",
"value": 39
}, {
"date": "2012-09-11",
"value": 34
}, {
"date": "2012-09-12",
"value": 29
}, {
"date": "2012-09-13",
"value": 34
}, {
"date": "2012-09-14",
"value": 37
}, {
"date": "2012-09-15",
"value": 42
}, {
"date": "2012-09-16",
"value": 49
}, {
"date": "2012-09-17",
"value": 46
}, {
"date": "2012-09-18",
"value": 47
}, {
"date": "2012-09-19",
"value": 55
}, {
"date": "2012-09-20",
"value": 59
}, {
"date": "2012-09-21",
"value": 58
}, {
"date": "2012-09-22",
"value": 57
}, {
"date": "2012-09-23",
"value": 61
}, {
"date": "2012-09-24",
"value": 59
}, {
"date": "2012-09-25",
"value": 67
}, {
"date": "2012-09-26",
"value": 65
}, {
"date": "2012-09-27",
"value": 61
}, {
"date": "2012-09-28",
"value": 66
}, {
"date": "2012-09-29",
"value": 69
}, {
"date": "2012-09-30",
"value": 71
}, {
"date": "2012-10-01",
"value": 67
}, {
"date": "2012-10-02",
"value": 63
}, {
"date": "2012-10-03",
"value": 46
}, {
"date": "2012-10-04",
"value": 32
}, {
"date": "2012-10-05",
"value": 21
}, {
"date": "2012-10-06",
"value": 18
}, {
"date": "2012-10-07",
"value": 21
}, {
"date": "2012-10-08",
"value": 28
}, {
"date": "2012-10-09",
"value": 27
}, {
"date": "2012-10-10",
"value": 36
}, {
"date": "2012-10-11",
"value": 33
}, {
"date": "2012-10-12",
"value": 31
}, {
"date": "2012-10-13",
"value": 30
}, {
"date": "2012-10-14",
"value": 34
}, {
"date": "2012-10-15",
"value": 38
}, {
"date": "2012-10-16",
"value": 37
}, {
"date": "2012-10-17",
"value": 44
}, {
"date": "2012-10-18",
"value": 49
}, {
"date": "2012-10-19",
"value": 53
}, {
"date": "2012-10-20",
"value": 57
}, {
"date": "2012-10-21",
"value": 60
}, {
"date": "2012-10-22",
"value": 61
}, {
"date": "2012-10-23",
"value": 69
}, {
"date": "2012-10-24",
"value": 67
}, {
"date": "2012-10-25",
"value": 72
}, {
"date": "2012-10-26",
"value": 77
}, {
"date": "2012-10-27",
"value": 75
}, {
"date": "2012-10-28",
"value": 70
}, {
"date": "2012-10-29",
"value": 72
}, {
"date": "2012-10-30",
"value": 70
}, {
"date": "2012-10-31",
"value": 72
}, {
"date": "2012-11-01",
"value": 73
}, {
"date": "2012-11-02",
"value": 67
}, {
"date": "2012-11-03",
"value": 68
}, {
"date": "2012-11-04",
"value": 65
}, {
"date": "2012-11-05",
"value": 71
}, {
"date": "2012-11-06",
"value": 75
}, {
"date": "2012-11-07",
"value": 74
}, {
"date": "2012-11-08",
"value": 71
}, {
"date": "2012-11-09",
"value": 76
}, {
"date": "2012-11-10",
"value": 77
}, {
"date": "2012-11-11",
"value": 81
}, {
"date": "2012-11-12",
"value": 83
}, {
"date": "2012-11-13",
"value": 80
}, {
"date": "2012-11-14",
"value": 81
}, {
"date": "2012-11-15",
"value": 87
}, {
"date": "2012-11-16",
"value": 82
}, {
"date": "2012-11-17",
"value": 86
}, {
"date": "2012-11-18",
"value": 80
}, {
"date": "2012-11-19",
"value": 87
}, {
"date": "2012-11-20",
"value": 83
}, {
"date": "2012-11-21",
"value": 85
}, {
"date": "2012-11-22",
"value": 84
}, {
"date": "2012-11-23",
"value": 82
}, {
"date": "2012-11-24",
"value": 73
}, {
"date": "2012-11-25",
"value": 71
}, {
"date": "2012-11-26",
"value": 75
}, {
"date": "2012-11-27",
"value": 79
}, {
"date": "2012-11-28",
"value": 70
}, {
"date": "2012-11-29",
"value": 73
}, {
"date": "2012-11-30",
"value": 61
}, {
"date": "2012-12-01",
"value": 62
}, {
"date": "2012-12-02",
"value": 66
}, {
"date": "2012-12-03",
"value": 65
}, {
"date": "2012-12-04",
"value": 73
}, {
"date": "2012-12-05",
"value": 79
}, {
"date": "2012-12-06",
"value": 78
}, {
"date": "2012-12-07",
"value": 78
}, {
"date": "2012-12-08",
"value": 78
}, {
"date": "2012-12-09",
"value": 74
}, {
"date": "2012-12-10",
"value": 73
}, {
"date": "2012-12-11",
"value": 75
}, {
"date": "2012-12-12",
"value": 70
}, {
"date": "2012-12-13",
"value": 77
}, {
"date": "2012-12-14",
"value": 67
}, {
"date": "2012-12-15",
"value": 62
}, {
"date": "2012-12-16",
"value": 64
}, {
"date": "2012-12-17",
"value": 61
}, {
"date": "2012-12-18",
"value": 59
}, {
"date": "2012-12-19",
"value": 53
}, {
"date": "2012-12-20",
"value": 54
}, {
"date": "2012-12-21",
"value": 56
}, {
"date": "2012-12-22",
"value": 59
}, {
"date": "2012-12-23",
"value": 58
}, {
"date": "2012-12-24",
"value": 55
}, {
"date": "2012-12-25",
"value": 52
}, {
"date": "2012-12-26",
"value": 54
}, {
"date": "2012-12-27",
"value": 50
}, {
"date": "2012-12-28",
"value": 50
}, {
"date": "2012-12-29",
"value": 51
}, {
"date": "2012-12-30",
"value": 52
}, {
"date": "2012-12-31",
"value": 58
}, {
"date": "2013-01-01",
"value": 60
}, {
"date": "2013-01-02",
"value": 67
}, {
"date": "2013-01-03",
"value": 64
}, {
"date": "2013-01-04",
"value": 66
}, {
"date": "2013-01-05",
"value": 60
}, {
"date": "2013-01-06",
"value": 63
}, {
"date": "2013-01-07",
"value": 61
}, {
"date": "2013-01-08",
"value": 60
}, {
"date": "2013-01-09",
"value": 65
}, {
"date": "2013-01-10",
"value": 75
}, {
"date": "2013-01-11",
"value": 77
}, {
"date": "2013-01-12",
"value": 78
}, {
"date": "2013-01-13",
"value": 70
}, {
"date": "2013-01-14",
"value": 70
}, {
"date": "2013-01-15",
"value": 73
}, {
"date": "2013-01-16",
"value": 71
}, {
"date": "2013-01-17",
"value": 74
}, {
"date": "2013-01-18",
"value": 78
}, {
"date": "2013-01-19",
"value": 85
}, {
"date": "2013-01-20",
"value": 82
}, {
"date": "2013-01-21",
"value": 83
}, {
"date": "2013-01-22",
"value": 88
}, {
"date": "2013-01-23",
"value": 85
}, {
"date": "2013-01-24",
"value": 85
}, {
"date": "2013-01-25",
"value": 80
}, {
"date": "2013-01-26",
"value": 87
}, {
"date": "2013-01-27",
"value": 84
}, {
"date": "2013-01-28",
"value": 83
}, {
"date": "2013-01-29",
"value": 84
}, {
"date": "2013-01-30",
"value": 81
}
]
HEAT_MAP_CHART_DATA2 = [
{
"hour": "12pm",
"weekday": "Sun",
"value": "0"
},
{
"hour": "1am",
"weekday": "Sun",
"value": "1"
},
{
"hour": "2am",
"weekday": "Sun",
"value": "2"
},
{
"hour": "3am",
"weekday": "Sun",
"value": "1"
},
{
"hour": "12pm",
"weekday": "Mon",
"value": "3"
},
{
"hour": "1am",
"weekday": "Mon",
"value": "1"
},
{
"hour": "2am",
"weekday": "Mon",
"value": "2"
},
{
"hour": "3am",
"weekday": "Mon",
"value": "2"
},
{
"hour": "12pm",
"weekday": "Tue",
"value": "1"
},
{
"hour": "1am",
"weekday": "Tue",
"value": "1"
},
{
"hour": "2am",
"weekday": "Tue",
"value": "2"
},
{
"hour": "3am",
"weekday": "Tue",
"value": "2"
},
{
"hour": "12pm",
"weekday": "Wed",
"value": "1"
},
{
"hour": "1am",
"weekday": "Wed",
"value": "3"
},
{
"hour": "2am",
"weekday": "Wed",
"value": "3"
},
{
"hour": "3am",
"weekday": "Wed",
"value": "2"
},
]
HEAT_MAP_CHART_DATA = [
{
"hour": "12pm",
"weekday": "Sun",
"value": 1900
},
{
"hour": "1am",
"weekday": "Sun",
"value": 2520
},
{
"hour": "2am",
"weekday": "Sun",
"value": 2334
},
{
"hour": "3am",
"weekday": "Sun",
"value": 2230
},
{
"hour": "4am",
"weekday": "Sun",
"value": 2325
},
{
"hour": "5am",
"weekday": "Sun",
"value": 2019
},
{
"hour": "6am",
"weekday": "Sun",
"value": 2128
},
{
"hour": "7am",
"weekday": "Sun",
"value": 2246
},
{
"hour": "8am",
"weekday": "Sun",
"value": 2421
},
{
"hour": "9am",
"weekday": "Sun",
"value": 2788
},
{
"hour": "10am",
"weekday": "Sun",
"value": 2959
},
{
"hour": "11am",
"weekday": "Sun",
"value": 3018
},
{
"hour": "12am",
"weekday": "Sun",
"value": 3154
},
{
"hour": "1pm",
"weekday": "Sun",
"value": 3172
},
{
"hour": "2pm",
"weekday": "Sun",
"value": 3368
},
{
"hour": "3pm",
"weekday": "Sun",
"value": 3464
},
{
"hour": "4pm",
"weekday": "Sun",
"value": 3746
},
{
"hour": "5pm",
"weekday": "Sun",
"value": 3656
},
{
"hour": "6pm",
"weekday": "Sun",
"value": 3336
},
{
"hour": "7pm",
"weekday": "Sun",
"value": 3292
},
{
"hour": "8pm",
"weekday": "Sun",
"value": 3269
},
{
"hour": "9pm",
"weekday": "Sun",
"value": 3300
},
{
"hour": "10pm",
"weekday": "Sun",
"value": 3403
},
{
"hour": "11pm",
"weekday": "Sun",
"value": 3323
},
{
"hour": "12pm",
"weekday": "Mon",
"value": 3346
},
{
"hour": "1am",
"weekday": "Mon",
"value": 2725
},
{
"hour": "2am",
"weekday": "Mon",
"value": 3052
},
{
"hour": "3am",
"weekday": "Mon",
"value": 3876
},
{
"hour": "4am",
"weekday": "Mon",
"value": 4453
},
{
"hour": "5am",
"weekday": "Mon",
"value": 3972
},
{
"hour": "6am",
"weekday": "Mon",
"value": 4644
},
{
"hour": "7am",
"weekday": "Mon",
"value": 5715
},
{
"hour": "8am",
"weekday": "Mon",
"value": 7080
},
{
"hour": "9am",
"weekday": "Mon",
"value": 8022
},
{
"hour": "10am",
"weekday": "Mon",
"value": 8446
},
{
"hour": "11am",
"weekday": "Mon",
"value": 9313
},
{
"hour": "12am",
"weekday": "Mon",
"value": 9011
},
{
"hour": "1pm",
"weekday": "Mon",
"value": 8508
},
{
"hour": "2pm",
"weekday": "Mon",
"value": 8515
},
{
"hour": "3pm",
"weekday": "Mon",
"value": 8399
},
{
"hour": "4pm",
"weekday": "Mon",
"value": 8649
},
{
"hour": "5pm",
"weekday": "Mon",
"value": 7869
},
{
"hour": "6pm",
"weekday": "Mon",
"value": 6933
},
{
"hour": "7pm",
"weekday": "Mon",
"value": 5969
},
{
"hour": "8pm",
"weekday": "Mon",
"value": 5552
},
{
"hour": "9pm",
"weekday": "Mon",
"value": 5434
},
{
"hour": "10pm",
"weekday": "Mon",
"value": 5070
},
{
"hour": "11pm",
"weekday": "Mon",
"value": 4851
},
{
"hour": "12pm",
"weekday": "Tue",
"value": 4468
},
{
"hour": "1am",
"weekday": "Tue",
"value": 3306
},
{
"hour": "2am",
"weekday": "Tue",
"value": 3906
},
{
"hour": "3am",
"weekday": "Tue",
"value": 4413
},
{
"hour": "4am",
"weekday": "Tue",
"value": 4726
},
{
"hour": "5am",
"weekday": "Tue",
"value": 4584
},
{
"hour": "6am",
"weekday": "Tue",
"value": 5717
},
{
"hour": "7am",
"weekday": "Tue",
"value": 6504
},
{
"hour": "8am",
"weekday": "Tue",
"value": 8104
},
{
"hour": "9am",
"weekday": "Tue",
"value": 8813
},
{
"hour": "10am",
"weekday": "Tue",
"value": 9278
},
{
"hour": "11am",
"weekday": "Tue",
"value": 10425
},
{
"hour": "12am",
"weekday": "Tue",
"value": 10137
},
{
"hour": "1pm",
"weekday": "Tue",
"value": 9290
},
{
"hour": "2pm",
"weekday": "Tue",
"value": 9255
},
{
"hour": "3pm",
"weekday": "Tue",
"value": 9614
},
{
"hour": "4pm",
"weekday": "Tue",
"value": 9713
},
{
"hour": "5pm",
"weekday": "Tue",
"value": 9667
},
{
"hour": "6pm",
"weekday": "Tue",
"value": 8774
},
{
"hour": "7pm",
"weekday": "Tue",
"value": 8649
},
{
"hour": "8pm",
"weekday": "Tue",
"value": 9937
},
{
"hour": "9pm",
"weekday": "Tue",
"value": 10286
},
{
"hour": "10pm",
"weekday": "Tue",
"value": 9175
},
{
"hour": "11pm",
"weekday": "Tue",
"value": 8581
},
{
"hour": "12pm",
"weekday": "Wed",
"value": 8145
},
{
"hour": "1am",
"weekday": "Wed",
"value": 7177
},
{
"hour": "2am",
"weekday": "Wed",
"value": 5657
},
{
"hour": "3am",
"weekday": "Wed",
"value": 6802
},
{
"hour": "4am",
"weekday": "Wed",
"value": 8159
},
{
"hour": "5am",
"weekday": "Wed",
"value": 8449
},
{
"hour": "6am",
"weekday": "Wed",
"value": 9453
},
{
"hour": "7am",
"weekday": "Wed",
"value": 9947
},
{
"hour": "8am",
"weekday": "Wed",
"value": 11471
},
{
"hour": "9am",
"weekday": "Wed",
"value": 12492
},
{
"hour": "10am",
"weekday": "Wed",
"value": 9388
},
{
"hour": "11am",
"weekday": "Wed",
"value": 9928
},
{
"hour": "12am",
"weekday": "Wed",
"value": 9644
},
{
"hour": "1pm",
"weekday": "Wed",
"value": 9034
},
{
"hour": "2pm",
"weekday": "Wed",
"value": 8964
},
{
"hour": "3pm",
"weekday": "Wed",
"value": 9069
},
{
"hour": "4pm",
"weekday": "Wed",
"value": 8898
},
{
"hour": "5pm",
"weekday": "Wed",
"value": 8322
},
{
"hour": "6pm",
"weekday": "Wed",
"value": 6909
},
{
"hour": "7pm",
"weekday": "Wed",
"value": 5810
},
{
"hour": "8pm",
"weekday": "Wed",
"value": 5151
},
{
"hour": "9pm",
"weekday": "Wed",
"value": 4911
},
{
"hour": "10pm",
"weekday": "Wed",
"value": 4487
},
{
"hour": "11pm",
"weekday": "Wed",
"value": 4118
},
{
"hour": "12pm",
"weekday": "Thu",
"value": 3689
},
{
"hour": "1am",
"weekday": "Thu",
"value": 3081
},
{
"hour": "2am",
"weekday": "Thu",
"value": 6525
},
{
"hour": "3am",
"weekday": "Thu",
"value": 6228
},
{
"hour": "4am",
"weekday": "Thu",
"value": 6917
},
{
"hour": "5am",
"weekday": "Thu",
"value": 6568
},
{
"hour": "6am",
"weekday": "Thu",
"value": 6405
},
{
"hour": "7am",
"weekday": "Thu",
"value": 8106
},
{
"hour": "8am",
"weekday": "Thu",
"value": 8542
},
{
"hour": "9am",
"weekday": "Thu",
"value": 8501
},
{
"hour": "10am",
"weekday": "Thu",
"value": 8802
},
{
"hour": "11am",
"weekday": "Thu",
"value": 9420
},
{
"hour": "12am",
"weekday": "Thu",
"value": 8966
},
{
"hour": "1pm",
"weekday": "Thu",
"value": 8135
},
{
"hour": "2pm",
"weekday": "Thu",
"value": 8224
},
{
"hour": "3pm",
"weekday": "Thu",
"value": 8387
},
{
"hour": "4pm",
"weekday": "Thu",
"value": 8218
},
{
"hour": "5pm",
"weekday": "Thu",
"value": 7641
},
{
"hour": "6pm",
"weekday": "Thu",
"value": 6469
},
{
"hour": "7pm",
"weekday": "Thu",
"value": 5441
},
{
"hour": "8pm",
"weekday": "Thu",
"value": 4952
},
{
"hour": "9pm",
"weekday": "Thu",
"value": 4643
},
{
"hour": "10pm",
"weekday": "Thu",
"value": 4393
},
{
"hour": "11pm",
"weekday": "Thu",
"value": 4017
},
{
"hour": "12pm",
"weekday": "Fri",
"value": 4022
},
{
"hour": "1am",
"weekday": "Fri",
"value": 3063
},
{
"hour": "2am",
"weekday": "Fri",
"value": 3638
},
{
"hour": "3am",
"weekday": "Fri",
"value": 3968
},
{
"hour": "4am",
"weekday": "Fri",
"value": 4070
},
{
"hour": "5am",
"weekday": "Fri",
"value": 4019
},
{
"hour": "6am",
"weekday": "Fri",
"value": 4548
},
{
"hour": "7am",
"weekday": "Fri",
"value": 5465
},
{
"hour": "8am",
"weekday": "Fri",
"value": 6909
},
{
"hour": "9am",
"weekday": "Fri",
"value": 7706
},
{
"hour": "10am",
"weekday": "Fri",
"value": 7867
},
{
"hour": "11am",
"weekday": "Fri",
"value": 8615
},
{
"hour": "12am",
"weekday": "Fri",
"value": 8218
},
{
"hour": "1pm",
"weekday": "Fri",
"value": 7604
},
{
"hour": "2pm",
"weekday": "Fri",
"value": 7429
},
{
"hour": "3pm",
"weekday": "Fri",
"value": 7488
},
{
"hour": "4pm",
"weekday": "Fri",
"value": 7493
},
{
"hour": "5pm",
"weekday": "Fri",
"value": 6998
},
{
"hour": "6pm",
"weekday": "Fri",
"value": 5941
},
{
"hour": "7pm",
"weekday": "Fri",
"value": 5068
},
{
"hour": "8pm",
"weekday": "Fri",
"value": 4636
},
{
"hour": "9pm",
"weekday": "Fri",
"value": 4241
},
{
"hour": "10pm",
"weekday": "Fri",
"value": 3858
},
{
"hour": "11pm",
"weekday": "Fri",
"value": 3833
},
{
"hour": "12pm",
"weekday": "Sat",
"value": 3503
},
{
"hour": "1am",
"weekday": "Sat",
"value": 2842
},
{
"hour": "2am",
"weekday": "Sat",
"value": 2808
},
{
"hour": "3am",
"weekday": "Sat",
"value": 2399
},
{
"hour": "4am",
"weekday": "Sat",
"value": 2280
},
{
"hour": "5am",
"weekday": "Sat",
"value": 2139
},
{
"hour": "6am",
"weekday": "Sat",
"value": 2527
},
{
"hour": "7am",
"weekday": "Sat",
"value": 2940
},
{
"hour": "8am",
"weekday": "Sat",
"value": 3066
},
{
"hour": "9am",
"weekday": "Sat",
"value": 3494
},
{
"hour": "10am",
"weekday": "Sat",
"value": 3287
},
{
"hour": "11am",
"weekday": "Sat",
"value": 3416
},
{
"hour": "12am",
"weekday": "Sat",
"value": 3432
},
{
"hour": "1pm",
"weekday": "Sat",
"value": 3523
},
{
"hour": "2pm",
"weekday": "Sat",
"value": 3542
},
{
"hour": "3pm",
"weekday": "Sat",
"value": 3347
},
{
"hour": "4pm",
"weekday": "Sat",
"value": 3292
},
{
"hour": "5pm",
"weekday": "Sat",
"value": 3416
},
{
"hour": "6pm",
"weekday": "Sat",
"value": 3131
},
{
"hour": "7pm",
"weekday": "Sat",
"value": 3057
},
{
"hour": "8pm",
"weekday": "Sat",
"value": 3227
},
{
"hour": "9pm",
"weekday": "Sat",
"value": 3060
},
{
"hour": "10pm",
"weekday": "Sat",
"value": 2855
},
{
"hour": "11pm",
"weekday": "Sat",
"value": 11000
}
]
PARALLEL_COORDINATES_DATA = [
[
"Mercury",
4222.6,
57.9,
0.2408467,
0.05527,
4879
],
[
"Venus",
2802,
108.2,
0.61519726,
0.815,
12104
],
[
"Earth",
24,
149.6,
1.0000174,
1,
12756
],
[
"Mars",
24.7,
227.9,
1.8808158,
0.10745,
6792
],
[
"Jupiter",
9.9,
778.6,
11.862615,
317.83,
142984
],
[
"Saturn",
10.7,
1433.5,
29.447498,
95.159,
120536
],
[
"Uranus",
17.2,
2872.5,
84.016846,
14.5,
51118
],
[
"Neptune",
16.1,
4495.1,
164.79132,
17.204,
49528
]
]
PARALLEL_COORDINATES_DATA_2 = [
['Beef, round, bottom round, roast, separable lean only, trimmed to 1/8" fat, all grades, cooked', 'Beef Products',
28.0, 0.006, 0.037, 0.0, 0.0, 0.23, 0.0, 0.0, 5.72, 65.93, 163.0, 1.972, 2.391, 0.231, 31792.0],
['Turkey, white, rotisserie, deli cut', 'Sausages and Luncheon Meats', 13.5, 0.016, 1.2, 0.4, 0.01, 0.349, 7.7, 4.0,
3.0, 72.0, 112.0, 0.118, 0.591, 0.37, 29432.0],
['Salad dressing, buttermilk, lite', 'Fats and Oils', 1.25, 0.04, 1.12, 1.1, 0.0006, 0.132, 21.33, 3.77, 12.42,
62.04, 202.0, 1.25, 2.794, 4.213, 28256.0],
['Beef, rib, small end (ribs 10-12), separable lean and fat, trimmed to 1/8" fat, select, cooked, roasted',
'Beef Products', 22.76, 0.013, 0.064, 0.0, 0.0, 0.328, 0.0, 0.0, 25.02, 50.84, 323.0, 10.05, 10.81, 0.91, 31759.0],
['Pectin, liquid', 'Sweets', 0.0, 0.0, 0.0, 2.1, 0.0, 0.0, 2.1, 0.0, 0.0, 96.9, 11.0, 0.0, 0.0, 0.0, 34037.0],
['Beef, chuck, arm pot roast, separable lean only, trimmed to 0" fat, choice, cooked, braised', 'Beef Products',
33.36, 0.014, 0.054, 0.0, 0.0, 0.262, 0.0, 0.0, 7.67, 58.83, 212.0, 2.903, 3.267, 0.268, 31532.0],
['Nuts, chestnuts, japanese, dried', 'Nut and Seed Products', 5.25, 0.072, 0.034, 0.0, 0.0613, 0.768, 81.43, 0.0,
1.24, 9.96, 360.0, 0.183, 0.65, 0.322, 31342.0],
['English muffins, mixed-grain (includes granola)', 'Baked Products', 9.1, 0.196, 0.334, 2.8, 0.0, 0.156, 46.3,
0.81, 1.8, 40.2, 235.0, 0.23, 0.827, 0.559, 33595.0],
['French toast, prepared from recipe, made with low fat (2%) milk', 'Baked Products', 7.7, 0.1, 0.479, 0.0, 0.0003,
0.134, 25.0, 0.0, 10.8, 54.7, 229.0, 2.723, 4.524, 2.594, 33609.0],
['Sauce, cheese sauce mix, dry', 'Soups, Sauces, and Gravies', 7.68, 0.204, 3.202, 1.0, 0.0009, 0.428, 60.52, 10.26,
18.33, 3.78, 438.0, 8.44, 6.868, 0.983, 28984.0],
['Chicken, broilers or fryers, skin only, raw', 'Poultry Products', 13.33, 0.011, 0.063, 0.0, 0.0, 0.103, 0.0, 0.0,
32.35, 54.22, 349.0, 9.08, 13.54, 6.81, 28421.0],
['Beef, brisket, flat half, separable lean only, trimmed to 1/8" fat, all grades, raw', 'Beef Products', 21.57,
0.016, 0.074, 0.0, 0.0, 0.336, 0.0, 0.0, 3.84, 73.76, 127.0, 1.425, 1.611, 0.166, 31466.0],
['Soup, pea, low sodium, prepared with equal volume water', 'Soups, Sauces, and Gravies', 3.2, 0.012, 0.01, 1.9,
0.0006, 0.071, 9.88, 3.19, 1.09, 84.55, 62.0, 0.524, 0.372, 0.142, 29144.0],
['Chilchen (Red Berry Beverage) (Navajo)', 'Ethnic Foods', 0.81, 0.007, 0.015, 0.0, 0.0, 0.028, 8.68, 2.6, 0.63,
89.69, 44.0, 0.075, 0.135, 0.151, 34928.0],
['Seeds, pumpkin and squash seed kernels, dried', 'Nut and Seed Products', 30.23, 0.046, 0.007, 6.0, 0.0019, 0.809,
10.71, 1.4, 49.05, 5.23, 559.0, 8.659, 16.242, 20.976, 31403.0],
['Chicken, roasting, giblets, raw', 'Poultry Products', 18.14, 0.01, 0.077, 0.0, 0.013099999999999999, 0.227, 1.14,
0.0, 5.04, 74.73, 127.0, 1.54, 1.28, 1.26, 28469.0],
['Veal, shoulder, whole (arm and blade), separable lean only, raw', 'Lamb, Veal, and Game Products', 19.79, 0.022,
0.092, 0.0, 0.0, 0.311, 0.0, 0.0, 3.0, 76.58, 112.0, 0.9, 0.96, 0.31, 33271.0],
['Fast foods, enchirito, with cheese, beef, and beans', 'Fast Foods', 9.27, 0.113, 0.648, 0.0, 0.0024, 0.29, 17.51,
0.0, 8.33, 62.67, 178.0, 4.118, 3.377, 0.17, 34435.0],
['Beef, round, top round, steak, separable lean and fat, trimmed to 1/8" fat, all grades, raw', 'Beef Products',
22.06, 0.021, 0.06, 0.0, 0.0, 0.349, 0.0, 0.0, 7.93, 69.04, 166.0, 3.123, 3.428, 0.303, 31888.0],
['Bread, pumpernickel, toasted', 'Baked Products', 9.5, 0.074, 0.738, 7.1, 0.0, 0.228, 52.2, 0.58, 3.4, 31.8, 275.0,
0.481, 1.024, 1.359, 33387.0],
['Cereals, corn grits, white, regular and quick, unenriched, cooked with water, without salt', 'Breakfast Cereals',
1.42, 0.003, 0.002, 0.3, 0.0, 0.021, 12.87, 0.1, 0.19, 85.35, 59.0, 0.025, 0.048, 0.083, 29807.0],
['Lamb, variety meats and by-products, brain, cooked, braised', 'Lamb, Veal, and Game Products', 12.55, 0.012,
0.134, 0.0, 0.012, 0.205, 0.0, 0.0, 10.17, 75.73, 145.0, 2.6, 1.84, 1.04, 33193.0],
['Fast foods, cheeseburger; single, large patty; with condiments, vegetables and ham', 'Fast Foods', 15.55, 0.119,
0.674, 0.0, 0.0029, 0.212, 13.01, 0.0, 18.97, 50.05, 286.0, 8.317, 7.429, 1.516, 34401.0],
["CAMPBELL Soup Company, CAMPBELL'S Red and White, 25% Less Sodium Cream of Mushroom Soup, condensed",
'Soups, Sauces, and Gravies', 1.61, 0.0, 0.524, 1.6, 0.0, 0.105, 6.45, 0.81, 6.45, 84.2, 89.0, 0.806, 2.419, 3.226,
28784.0],
['Cereals ready-to-eat, wheat and malt barley flakes', 'Breakfast Cereals', 10.0, 0.039, 0.482, 8.8, 0.0, 0.34,
81.5, 17.6, 2.9, 3.2, 365.0, 0.6, 0.816, 1.05, 29695.0],
['Cream, sour, reduced fat, cultured', 'Dairy and Egg Products', 2.94, 0.104, 0.041, 0.0, 0.0009, 0.129, 4.26, 0.16,
12.0, 80.14, 135.0, 7.47, 3.466, 0.446, 27590.0],
['Pork, fresh, loin, sirloin (chops or roasts), bone-in, separable lean and fat, raw', 'Pork Products', 20.48,
0.014, 0.057, 0.0, 0.0, 0.336, 0.0, 0.0, 8.96, 70.29, 168.0, 1.851, 2.157, 0.754, 30382.0],
['Wheat flour, white (industrial), 13% protein, bleached, unenriched', 'Cereal Grains and Pasta', 13.07, 0.024,
0.002, 2.4, 0.0, 0.128, 72.2, 1.1, 1.38, 12.82, 362.0, 0.189, 0.152, 0.683, 34293.0],
['Fish, herring, Pacific, raw', 'Finfish and Shellfish Products', 16.39, 0.083, 0.074, 0.0, 0.0, 0.423, 0.0, 0.0,
13.88, 71.52, 195.0, 3.257, 6.872, 2.423, 32413.0],
['Water, non-carbonated, bottles, natural fruit flavors, sweetened with low calorie sweetener', 'Beverages', 0.0,
0.001, 0.003, 0.0, 0.0, 0.004, 0.15, 0.0, 0.0, 99.85, 1.0, 0.0, 0.0, 0.0, 32324.0],
['Babyfood, fruit, peaches, junior', 'Baby Foods', 0.94, 0.004, 0.004, 1.3, 0.0461, 0.195, 14.48, 11.5, 0.33, 83.65,
65.0, 0.02, 0.01, 0.04, 27921.0],
['Alcoholic beverage, pina colada, prepared-from-recipe', 'Beverages', 0.42, 0.008, 0.006, 0.3,
0.004900000000000001, 0.071, 22.66, 22.33, 1.88, 64.99, 174.0, 1.636, 0.082, 0.033, 32092.0],
['Pasta, homemade, made with egg, cooked', 'Cereal Grains and Pasta', 5.28, 0.01, 0.083, 0.0, 0.0, 0.021, 23.54,
0.0, 1.74, 68.71, 130.0, 0.408, 0.508, 0.521, 34217.0],
["WENDY'S, French Fries", 'Fast Foods', 3.89, 0.015, 0.172, 3.7, 0.0050999999999999995, 0.575, 39.44, 0.0, 16.23,
38.81, 319.0, 3.201, 8.518, 3.728, 34676.0],
['Beef, rib, small end (ribs 10-12), separable lean only, trimmed to 1/8" fat, select, cooked, broiled',
'Beef Products', 30.87, 0.022, 0.066, 0.0, 0.0, 0.409, 0.0, 0.0, 6.22, 62.45, 188.0, 2.369, 2.484, 0.222, 31767.0],
['Turkey, fryer-roasters, back, meat and skin, cooked, roasted', 'Poultry Products', 26.15, 0.036, 0.07, 0.0, 0.0,
0.208, 0.0, 0.0, 10.24, 63.27, 204.0, 2.99, 3.53, 2.64, 28608.0],
['Cereals ready-to-eat, GENERAL MILLS, Cinnamon Toast Crunch, reduced sugar', 'Breakfast Cereals', 4.9, 0.714,
0.718, 8.9, 0.02, 0.15, 77.4, 22.1, 10.2, 2.64, 381.0, 1.0, 6.8, 2.1, 29464.0],
['Cookies, sugar, prepared from recipe, made with margarine', 'Baked Products', 5.9, 0.073, 0.491, 1.2, 0.0001,
0.077, 60.0, 24.86, 23.4, 8.9, 472.0, 4.69, 10.189, 7.29, 33525.0],
['Fruit punch juice drink, frozen concentrate', 'Beverages', 0.3, 0.02, 0.01, 0.2, 0.0197, 0.27, 43.1, 0.0, 0.7,
55.5, 175.0, 0.087, 0.086, 0.173, 32234.0],
['GREEN GIANT, HARVEST BURGER, Original Flavor, All Vegetable Protein Patties, frozen',
'Legumes and Legume Products', 20.0, 0.113, 0.457, 6.3, 0.0, 0.48, 7.8, 0.0, 4.6, 65.0, 153.0, 1.13, 2.372, 0.295,
32706.0],
['Babyfood, cereal, mixed, with bananas, prepared with whole milk', 'Baby Foods', 3.82, 0.153, 0.048, 0.4, 0.0004,
0.178, 10.0, 5.9, 3.46, 81.81, 86.0, 1.808, 0.859, 0.342, 27805.0],
['Gelatin desserts, dry mix, reduced calorie, with aspartame, prepared with water', 'Sweets', 0.83, 0.003, 0.048,
0.0, 0.0, 0.001, 4.22, 0.0, 0.0, 94.74, 20.0, 0.0, 0.0, 0.0, 33998.0],
['Soup, chicken with rice, canned, prepared with equal volume water', 'Soups, Sauces, and Gravies', 1.45, 0.009,
0.238, 0.3, 0.0001, 0.041, 2.92, 0.08, 0.78, 93.87, 24.0, 0.185, 0.37, 0.17, 29075.0],
['Fast foods, corn on the cob with butter', 'Fast Foods', 3.06, 0.003, 0.02, 0.0, 0.0047, 0.246, 21.88, 0.0, 2.35,
72.05, 106.0, 1.125, 0.688, 0.419, 34422.0],
['Infant formula, PBM PRODUCTS, store brand, ready-to-feed (formerly WYETH-AYERST)', 'Baby Foods', 1.4, 0.041,
0.014, 0.0, 0.0056, 0.055, 6.39, 6.4, 3.5, 88.1, 63.0, 1.6, 1.4, 0.5, 28103.0],
['Alcoholic beverage, liqueur, coffee with cream, 34 proof', 'Beverages', 2.8, 0.016, 0.092, 0.0, 0.0002, 0.032,
20.9, 19.76, 15.7, 46.5, 327.0, 9.664, 4.458, 0.669, 32088.0],
['Fruit cocktail, (peach and pineapple and pear and grape and cherry), canned, juice pack, solids and liquids',
'Fruits and Fruit Juices', 0.46, 0.008, 0.004, 1.0, 0.0027, 0.095, 11.86, 10.86, 0.01, 87.44, 46.0, 0.0, 0.002,
0.004, 29947.0],
["CAMPBELL Soup Company, CAMPBELL'S CHUNKY Soups, Split Pea 'N' Ham Soup", 'Soups, Sauces, and Gravies', 4.9, 0.008,
0.318, 2.0, 0.001, 0.0, 12.24, 2.04, 1.02, 81.0, 78.0, 0.408, 0.0, 0.0, 28763.0],
['Beverage, Horchata, dry mix, unprepared, variety of brands, all with morro seeds', 'Beverages', 7.5, 0.06, 0.003,
4.0, 0.0003, 0.18, 79.05, 38.9, 7.46, 4.89, 413.0, 2.086, 3.174, 2.069, 32121.0],
['Beans, adzuki, mature seed, cooked, boiled, with salt', 'Legumes and Legume Products', 7.52, 0.028, 0.244, 7.3,
0.0, 0.532, 24.77, 0.0, 0.1, 66.29, 128.0, 0.036, 0.0, 0.0, 32592.0],
['Beans, french, mature seeds, raw', 'Legumes and Legume Products', 18.81, 0.186, 0.018, 25.2, 0.0046, 1.316, 64.11,
0.0, 2.02, 10.77, 343.0, 0.221, 0.138, 1.207, 32618.0],
['Lamb, domestic, loin, separable lean and fat, trimmed to 1/8" fat, choice, cooked, roasted',
'Lamb, Veal, and Game Products', 23.27, 0.018, 0.064, 0.0, 0.0, 0.25, 0.0, 0.0, 21.12, 54.33, 290.0, 9.08, 8.65,
1.69, 33129.0],
['Egg, whole, cooked, hard-boiled', 'Dairy and Egg Products', 12.58, 0.05, 0.124, 0.0, 0.0, 0.126, 1.12, 1.12,
10.61, 74.62, 155.0, 3.267, 4.077, 1.414, 27610.0],
['Cereals ready-to-eat, GENERAL MILLS, CHEERIOS, Banana Nut', 'Breakfast Cereals', 5.4, 0.357, 0.57, 6.1, 0.0536,
0.252, 84.7, 33.4, 4.0, 2.2, 374.0, 0.7, 1.2, 1.9, 29454.0],
['Cornmeal, self-rising, bolted, plain, enriched, yellow', 'Cereal Grains and Pasta', 8.28, 0.361, 1.247, 6.7, 0.0,
0.255, 70.28, 0.0, 3.4, 12.59, 334.0, 0.478, 0.897, 1.55, 34164.0],
['Snacks, popcorn, oil-popped, white popcorn', 'Snacks', 9.0, 0.01, 0.884, 10.0, 0.0003, 0.225, 57.2, 0.0, 28.1,
2.8, 500.0, 4.89, 8.17, 13.42, 34841.0],
['Kamut, uncooked', 'Cereal Grains and Pasta', 14.7, 0.024, 0.006, 9.1, 0.0, 0.446, 70.38, 8.19, 2.2, 10.95, 337.0,
0.192, 0.214, 0.616, 34177.0],
['Snacks, pretzels, hard, plain, made with unenriched flour, unsalted', 'Snacks', 9.1, 0.036, 0.289, 2.8, 0.0,
0.146, 79.2, 0.0, 3.5, 3.3, 381.0, 0.75, 1.36, 1.22, 34864.0],
['Fast foods, danish pastry, fruit', 'Fast Foods', 5.06, 0.023, 0.354, 0.0, 0.0017, 0.117, 47.94, 0.0, 16.95, 29.0,
356.0, 3.527, 10.74, 1.668, 34430.0],
['Chicken, broilers or fryers, meat and skin and giblets and neck, cooked, fried, flour', 'Poultry Products', 28.57,
0.017, 0.086, 0.0, 0.0005, 0.237, 3.27, 0.0, 15.27, 51.88, 272.0, 4.16, 5.99, 3.51, 28395.0],
['Cheese, monterey', 'Dairy and Egg Products', 24.48, 0.746, 0.536, 0.0, 0.0, 0.081, 0.68, 0.5, 30.28, 41.01, 373.0,
19.066, 8.751],
['Restaurant, Chinese, fried rice', 'Restaurant Foods', 4.67, 0.014, 0.396, 1.1, 0.0, 0.088, 30.99, 0.42, 2.27,
60.99, 163.0, 0.497, 0.598, 0.947, 35087.0],
['Pork, cured, ham and water product, slice, bone-in, separable lean and fat, heated, pan-broil', 'Pork Products',
19.85, 0.011, 1.188, 0.0, 0.0, 0.281, 1.41, 1.03, 7.78, 67.21, 155.0, 2.586, 3.577, 1.032, 30219.0],
['Bread, protein, toasted (includes gluten)', 'Baked Products', 13.2, 0.136, 0.601, 3.3, 0.0, 0.346, 48.1, 1.44,
2.4, 34.0, 270.0, 0.364, 0.201, 1.109, 33385.0],
['Cheese, dry white, queso seco', 'Dairy and Egg Products', 24.51, 0.661, 1.808, 0.0, 0.0, 0.116, 2.04, 0.55, 24.35,
42.17, 325.0, 13.718, 6.418],
['Spices, thyme, dried', 'Spices and Herbs', 9.11, 1.89, 0.055, 37.0, 0.05, 0.814, 63.94, 1.71, 7.43, 7.79, 276.0,
2.73, 0.47, 1.19, 27771.0],
['LITTLE CAESARS 14" Pepperoni Pizza, Large Deep Dish Crust', 'Fast Foods', 12.93, 0.201, 0.492, 1.5, 0.0, 0.173,
29.03, 3.39, 10.81, 44.98, 265.0, 4.314, 3.151, 1.756, 34535.0],
['Fish, sunfish, pumpkin seed, raw', 'Finfish and Shellfish Products', 19.4, 0.08, 0.08, 0.0, 0.001, 0.35, 0.0, 0.0,
0.7, 79.5, 89.0, 0.139, 0.117, 0.246, 32511.0],
['Soup, escarole, canned, ready-to-serve', 'Soups, Sauces, and Gravies', 0.62, 0.013, 1.558, 0.0, 0.0018, 0.107,
0.72, 0.0, 0.73, 96.89, 11.0, 0.22, 0.33, 0.15, 29120.0],
['KRAFT CHEEZ WHIZ LIGHT Pasteurized Process Cheese Product', 'Dairy and Egg Products', 16.3, 0.418, 1.705, 0.2,
0.0004, 0.297, 16.2, 8.2, 9.5, 51.5, 215.0, 6.4, 0.0, 0.0, 27633.0],
['Alcoholic beverage, wine, light', 'Beverages', 0.07, 0.009, 0.007, 0.0, 0.0, 0.088, 1.17, 1.15, 0.0, 92.23, 49.0,
0.0, 0.0, 0.0, 32102.0],
['Apricots, canned, extra light syrup pack, with skin, solids and liquids', 'Fruits and Fruit Juices', 0.6, 0.01,
0.002, 1.6, 0.004, 0.14, 12.5, 0.0, 0.1, 86.3, 49.0, 0.007, 0.043, 0.02, 29869.0],
['Beans, cranberry (roman), mature seeds, cooked, boiled, without salt', 'Legumes and Legume Products', 9.34, 0.05,
0.001, 10.0, 0.0, 0.387, 24.46, 0.0, 0.46, 64.65, 136.0, 0.119, 0.04, 0.199, 32614.0],
['Flan, caramel custard, dry mix', 'Sweets', 0.0, 0.024, 0.432, 0.0, 0.0, 0.153, 91.6, 0.0, 0.0, 7.8, 348.0, 0.0,
0.0, 0.0, 33946.0],
['USDA Commodity, salmon nuggets, cooked as purchased, unheated', 'Finfish and Shellfish Products', 11.97, 0.009,
0.167, 0.0, 0.0, 0.161, 11.85, 0.0, 10.43, 64.57, 189.0, 1.497, 4.33, 2.863, 32587.0],
['Soup, scotch broth, canned, prepared with equal volume water', 'Soups, Sauces, and Gravies', 2.03, 0.008, 0.415,
0.5, 0.0004, 0.065, 3.87, 0.26, 1.07, 91.84, 33.0, 0.455, 0.315, 0.225, 29156.0],
['Game meat, caribou, raw', 'Lamb, Veal, and Game Products', 22.63, 0.017, 0.057, 0.0, 0.0, 0.295, 0.0, 0.0, 3.36,
71.45, 127.0, 1.29, 1.01, 0.47, 32981.0],
['Buckwheat groats, roasted, cooked', 'Cereal Grains and Pasta', 3.38, 0.007, 0.004, 2.7, 0.0, 0.088, 19.94, 0.9,
0.62, 75.63, 92.0, 0.134, 0.188, 0.188, 34145.0],
['Peaches, frozen, sliced, sweetened', 'Fruits and Fruit Juices', 0.63, 0.003, 0.006, 1.8, 0.0942, 0.13, 23.98,
22.18, 0.13, 74.73, 94.0, 0.014, 0.048, 0.064, 30071.0],
['KENTUCKY FRIED CHICKEN, Popcorn Chicken', 'Fast Foods', 17.67, 0.032, 1.14, 1.0, 0.0, 0.288, 21.18, 0.0, 21.74,
35.62, 351.0, 3.954, 5.66, 10.093, 34528.0],
['Frostings, chocolate, creamy, dry mix, prepared with margarine', 'Sweets', 1.1, 0.012, 0.163, 1.9, 0.0, 0.143,
71.02, 0.0, 12.87, 14.07, 404.0, 1.74, 3.943, 2.797, 33951.0],
['Ham, minced', 'Sausages and Luncheon Meats', 16.28, 0.01, 1.245, 0.0, 0.0, 0.311, 1.84, 0.0, 20.68, 57.35, 263.0,
7.181, 9.581, 2.47, 29276.0],
['Millet, cooked', 'Cereal Grains and Pasta', 3.51, 0.003, 0.002, 1.3, 0.0, 0.062, 23.67, 0.13, 1.0, 71.41, 119.0,
0.172, 0.184, 0.508, 34191.0],
['Beans, kidney, red, mature seeds, canned', 'Legumes and Legume Products', 5.26, 0.025, 0.258, 5.4, 0.0011, 0.256,
15.54, 1.86, 0.36, 77.37, 84.0, 0.085, 0.171, 0.151, 32630.0],
['Pie, pumpkin, prepared from recipe', 'Baked Products', 4.5, 0.094, 0.225, 0.0, 0.0017, 0.186, 26.4, 0.0, 9.3,
58.5, 204.0, 3.171, 3.697, 1.81, 33733.0],
["Syrups, chocolate, HERSHEY'S Genuine Chocolate Flavored Lite Syrup", 'Sweets', 1.4, 0.011, 0.1, 0.0, 0.0, 0.187,
34.56, 28.57, 0.97, 62.16, 153.0, 0.0, 0.0, 0.0, 34113.0],
['Beef, rib, shortribs, separable lean and fat, choice, raw', 'Beef Products', 14.4, 0.009, 0.049, 0.0, 0.0, 0.232,
0.0, 0.0, 36.23, 48.29, 388.0, 15.76, 16.39, 1.32, 31743.0],
['Crackers, rye, wafers, seasoned', 'Baked Products', 9.0, 0.044, 0.887, 20.9, 0.0001, 0.454, 73.8, 0.0, 9.2, 4.0,
381.0, 1.287, 3.27, 3.608, 33551.0],
['Beef, rib, small end (ribs 10-12), separable lean and fat, trimmed to 1/8" fat, select, raw', 'Beef Products',
19.56, 0.022, 0.049, 0.0, 0.0, 0.297, 0.0, 0.0, 18.0, 62.0, 246.0, 7.264, 7.708, 0.685, 31760.0],
['Egg, whole, dried, stabilized, glucose reduced', 'Dairy and Egg Products', 48.17, 0.222, 0.548, 0.0, 0.0, 0.515,
2.38, 0.0, 43.95, 1.87, 615.0, 13.198, 17.564, 5.713, 27615.0],
['Beef, round, eye of round, roast, separable lean and fat, trimmed to 1/8" fat, select, raw', 'Beef Products',
21.3, 0.024, 0.062, 0.0, 0.0, 0.346, 0.0, 0.0, 7.57, 70.06, 159.0, 2.98, 3.271, 0.289, 31820.0],
['CAMPBELL Soup Company, PREGO Pasta, Traditional Italian Sauce, ready-to-serve', 'Soups, Sauces, and Gravies',
1.54, 0.015, 0.369, 2.3, 0.0018, 0.292, 10.0, 7.69, 1.15, 80.0, 54.0, 0.0, 0.0, 0.0, 28954.0],
['Bagels, cinnamon-raisin, toasted', 'Baked Products', 10.6, 0.02, 0.346, 2.5, 0.0006, 0.163, 59.3, 6.43, 1.8, 26.9,
294.0, 0.295, 0.188, 0.722, 33336.0],
['Blackberry juice, canned', 'Fruits and Fruit Juices', 0.3, 0.012, 0.001, 0.1, 0.011300000000000001, 0.135, 7.8,
7.7, 0.6, 90.9, 38.0, 0.018, 0.058, 0.344, 29892.0],
[
'Beef, chuck eye roast, boneless, America\'s Beef Roast, separable lean and fat, trimmed to 0" fat, select, cooked, roasted',
'Beef Products', 24.86, 0.019, 0.076, 0.0, 0.0, 0.312, 0.0, 0.0, 14.41, 61.29, 229.0, 6.192, 6.843, 1.2,
31498.0],
['Babyfood, ravioli, cheese filled, with tomato sauce', 'Baby Foods', 3.6, 0.054, 0.282, 0.1, 0.0001, 0.032, 16.3,
0.59, 2.2, 76.5, 99.0, 0.96, 0.57, 0.48, 27984.0],
["CAMPBELL Soup Company, Campbell's Pork and Beans", 'Legumes and Legume Products', 4.62, 0.031, 0.338, 5.4, 0.0,
0.0, 19.23, 6.15, 1.15, 73.7, 108.0, 0.385, 0.0, 0.0, 32664.0],
['Cockles, raw (Alaska Native)', 'Ethnic Foods', 13.5, 0.03, 0.0, 0.0, 0.0, 0.0, 4.7, 0.0, 0.7, 78.8, 79.0, 0.0,
0.0, 0.0, 34933.0],
['Soy protein concentrate, crude protein basis (N x 6.25), produced by acid wash', 'Legumes and Legume Products',
63.63, 0.363, 0.9, 5.5, 0.0, 0.45, 25.41, 0.0, 0.46, 5.8, 328.0, 0.052, 0.079, 0.201, 32861.0],
['Turkey, light or dark meat, smoked, cooked, skin and bone removed', 'Poultry Products', 29.3, 0.025, 0.996, 0.0,
0.0, 0.298, 0.0, 0.0, 5.0, 64.9, 162.0, 1.368, 1.046, 1.408, 28644.0]
]
|
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This sample uses the Google Spreadsheets data API and the Google
# Calendar data API. The script pulls a list of birthdays from a
# Google Spreadsheet and inserts them as webContent events in the
# user's Google Calendar.
#
# The script expects a certain format in the spreadsheet: Name,
# Birthday, Photo URL, and Edit URL as headers. Expected format
# of the birthday is: MM/DD. Edit URL is to be left blank by the
# user - the script uses this column to determine whether to insert
# a new event or to update an event at the URL.
#
# See the spreadsheet below for an example:
# http://spreadsheets.google.com/pub?key=pfMX-JDVnx47J0DxqssIQHg
#
__author__ = 'api.stephaniel@google.com (Stephanie Liu)'
from __future__ import print_function
try:
from xml.etree import ElementTree # for Python 2.5 users
except:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.calendar.service
import gdata.calendar
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
import string
import time
import datetime
import getopt
import getpass
import sys
class BirthdaySample:
# CONSTANTS: Expected column headers: name, birthday, photourl, editurl &
# default calendar reminder set to 2 days
NAME = "name"
BIRTHDAY = "birthday"
PHOTO_URL = "photourl"
EDIT_URL = "editurl"
REMINDER = 60 * 24 * 2 # minutes
def __init__(self, email, password):
""" Initializes spreadsheet and calendar clients.
Creates SpreadsheetsService and CalendarService objects and
authenticates to each with ClientLogin. For more information
about ClientLogin authentication:
http://code.google.com/apis/accounts/AuthForInstalledApps.html
Args:
email: string
password: string
"""
self.s_client = gdata.spreadsheet.service.SpreadsheetsService()
self.s_client.email = email
self.s_client.password = password
self.s_client.source = 'exampleCo-birthdaySample-1'
self.s_client.ProgrammaticLogin()
self.c_client = gdata.calendar.service.CalendarService()
self.c_client.email = email
self.c_client.password = password
self.c_client.source = 'exampleCo-birthdaySample-1'
self.c_client.ProgrammaticLogin()
def _PrintFeed(self, feed):
""" Prints out Spreadsheet feeds in human readable format.
Generic function taken from spreadsheetsExample.py.
Args:
feed: SpreadsheetsCellsFeed, SpreadsheetsListFeed,
SpreadsheetsWorksheetsFeed, or SpreadsheetsSpreadsheetsFeed
"""
for i, entry in enumerate(feed.entry):
if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed):
print('%s %s\n' % (entry.title.text, entry.content.text))
elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed):
print('%s %s %s\n' % (i, entry.title.text, entry.content.text))
else:
print('%s %s\n' % (i, entry.title.text))
def _PromptForSpreadsheet(self):
""" Prompts user to select spreadsheet.
Gets and displays titles of all spreadsheets for user to
select. Generic function taken from spreadsheetsExample.py.
Args:
none
Returns:
spreadsheet ID that the user selected: string
"""
feed = self.s_client.GetSpreadsheetsFeed()
self._PrintFeed(feed)
input = input('\nSelection: ')
# extract and return the spreadsheet ID
return feed.entry[string.atoi(input)].id.text.rsplit('/', 1)[1]
def _PromptForWorksheet(self, key):
""" Prompts user to select desired worksheet.
Gets and displays titles of all worksheets for user to
select. Generic function taken from spreadsheetsExample.py.
Args:
key: string
Returns:
the worksheet ID that the user selected: string
"""
feed = self.s_client.GetWorksheetsFeed(key)
self._PrintFeed(feed)
input = input('\nSelection: ')
# extract and return the worksheet ID
return feed.entry[string.atoi(input)].id.text.rsplit('/', 1)[1]
def _AddReminder(self, event, minutes):
""" Adds a reminder to a calendar event.
This function sets the reminder attribute of the CalendarEventEntry.
The script sets it to 2 days by default, and this value is not
settable by the user. However, it can easily be changed to take this
option.
Args:
event: CalendarEventEntry
minutes: int
Returns:
the updated event: CalendarEventEntry
"""
for a_when in event.when:
if len(a_when.reminder) > 0:
a_when.reminder[0].minutes = minutes
else:
a_when.reminder.append(gdata.calendar.Reminder(minutes=minutes))
return self.c_client.UpdateEvent(event.GetEditLink().href, event)
def _CreateBirthdayWebContentEvent(self, name, birthday, photo_url):
""" Create the birthday web content event.
This function creates and populates a CalendarEventEntry. webContent
specific attributes are set. To learn more about the webContent
format:
http://www.google.com/support/calendar/bin/answer.py?answer=48528
Args:
name: string
birthday: string - expected format (MM/DD)
photo_url: string
Returns:
the webContent CalendarEventEntry
"""
title = "%s's Birthday!" % name
content = "It's %s's Birthday!" % name
month = string.atoi(birthday.split("/")[0])
day = string.atoi(birthday.split("/")[1])
# Get current year
year = time.ctime()[-4:]
year = string.atoi(year)
# Calculate the "end date" for the all day event
start_time = datetime.date(year, month, day)
one_day = datetime.timedelta(days=1)
end_time = start_time + one_day
start_time_str = start_time.strftime("%Y-%m-%d")
end_time_str = end_time.strftime("%Y-%m-%d")
# Create yearly recurrence rule
recurrence_data = ("DTSTART;VALUE=DATE:%s\r\n"
"DTEND;VALUE=DATE:%s\r\n"
"RRULE:FREQ=YEARLY;WKST=SU\r\n" %
(start_time.strftime("%Y%m%d"), end_time.strftime("%Y%m%d")))
web_rel = "http://schemas.google.com/gCal/2005/webContent"
icon_href = "http://www.perstephanie.com/images/birthdayicon.gif"
icon_type = "image/gif"
extension_text = (
'gCal:webContent xmlns:gCal="http://schemas.google.com/gCal/2005"'
' url="%s" width="300" height="225"' % (photo_url))
event = gdata.calendar.CalendarEventEntry()
event.title = atom.Title(text=title)
event.content = atom.Content(text=content)
event.recurrence = gdata.calendar.Recurrence(text=recurrence_data)
event.when.append(gdata.calendar.When(start_time=start_time_str,
end_time=end_time_str))
# Adding the webContent specific XML
event.link.append(atom.Link(rel=web_rel, title=title, href=icon_href,
link_type=icon_type))
event.link[0].extension_elements.append(
atom.ExtensionElement(extension_text))
return event
def _InsertBirthdayWebContentEvent(self, event):
""" Insert event into the authenticated user's calendar.
Args:
event: CalendarEventEntry
Returns:
the newly created CalendarEventEntry
"""
edit_uri = '/calendar/feeds/default/private/full'
return self.c_client.InsertEvent(event, edit_uri)
def Run(self):
""" Run sample.
TODO: add exception handling
Args:
none
"""
key_id = self._PromptForSpreadsheet()
wksht_id = self._PromptForWorksheet(key_id)
feed = self.s_client.GetListFeed(key_id, wksht_id)
found_name = False
found_birthday = False
found_photourl = False
found_editurl = False
# Check to make sure all headers are present
# Need to find at least one instance of name, birthday, photourl
# editurl
if len(feed.entry) > 0:
for name, custom in feed.entry[0].custom.items():
if custom.column == self.NAME:
found_name = True
elif custom.column == self.BIRTHDAY:
found_birthday = True
elif custom.column == self.PHOTO_URL:
found_photourl = True
elif custom.column == self.EDIT_URL:
found_editurl = True
if not found_name and found_birthday and found_photourl and found_editurl:
print(("ERROR - Unexpected number of column headers. Should have: %s,"
" %s, %s, and %s." % (self.NAME, self.BIRTHDAY, self.PHOTO_URL,
self.EDIT_URL)))
sys.exit(1)
# For every row in the spreadsheet, grab all the data and either insert
# a new event into the calendar, or update the existing event
# Create dict to represent the row data to update edit link back to
# Spreadsheet
for entry in feed.entry:
d = {}
input_valid = True
for name, custom in entry.custom.items():
d[custom.column] = custom.text
month = int(d[self.BIRTHDAY].split("/")[0])
day = int(d[self.BIRTHDAY].split("/")[1])
# Some input checking. Script will allow the insert to continue with
# a missing name value.
if d[self.NAME] is None:
d[self.NAME] = " "
if d[self.PHOTO_URL] is None:
input_valid = False
if d[self.BIRTHDAY] is None:
input_valid = False
elif not 1 <= month <= 12 or not 1 <= day <= 31:
input_valid = False
if d[self.EDIT_URL] is None and input_valid:
event = self._CreateBirthdayWebContentEvent(d[self.NAME],
d[self.BIRTHDAY], d[self.PHOTO_URL])
event = self._InsertBirthdayWebContentEvent(event)
event = self._AddReminder(event, self.REMINDER)
print("Added %s's birthday!" % d[self.NAME])
elif input_valid: # Event already exists
edit_link = d[self.EDIT_URL]
event = self._CreateBirthdayWebContentEvent(d[self.NAME],
d[self.BIRTHDAY], d[self.PHOTO_URL])
event = self.c_client.UpdateEvent(edit_link, event)
event = self._AddReminder(event, self.REMINDER)
print("Updated %s's birthday!" % d[self.NAME])
if input_valid:
d[self.EDIT_URL] = event.GetEditLink().href
self.s_client.UpdateRow(entry, d)
else:
print("Warning - Skipping row, missing valid input.")
def main():
email = input("Please enter your email: ")
password = getpass.getpass("Please enter your password: ")
sample = BirthdaySample(email, password)
sample.Run()
if __name__ == '__main__':
main()
|
"""This module handles the encryption of decryption of data."""
import re
class Crypter:
"""This class generates the encryption key, calls the correct function, and returns the text.
Arguments:
- password: hashed (scrambled) version of the password, used to generate the encryption key
- text: in plaintext (for encryption) or encrypted (for decryption) depending on the request
Other variables:
- encryption_key: directly instructs the program how to encrypt the data (hashed and salted)
Returns:
- the encrypted (after encryption) or plaintext data (after decryption)."""
def __init__(self, password, text):
"""Initializes the variables."""
self.password = password
self.text = text
self._encryption_key = None
@property
def encryption_key(self): # Stored as property to allow other areas to call key
"""Returns the encryption key."""
return self._encryption_key
@encryption_key.setter
def encryption_key(self, value):
"""This function generates the encryption key."""
if value:
nums = [] # Individual numbers in encryption key
salt = [] # Salted numbers to be added to e
print("Generating encryption key...")
for i in self.password[0:4]: # 1st-4th characters of the password
if re.match(r'[Nr(Dc.V]', i):
nums.append(0)
elif re.match(r'[QnJeZak]', i):
nums.append(1)
elif re.match(r'[i7*zHuy]', i):
nums.append(2)
elif re.match(r'[SF[jhR3x]', i):
nums.append(3)
elif re.match(r'[pYo#5wU]', i):
nums.append(4)
elif re.match(r'[bT0gKA2]', i):
nums.append(5)
elif re.match(r'[@XvI8s1]', i):
nums.append(6)
elif re.match(r'[)OdlLqC]', i):
nums.append(7)
elif re.match(r'[/,W4tPB9]', i):
nums.append(8)
else:
nums.append(9)
for i in self.password[4:8]: # 5th-8th characters of the password
if re.match(r'[Aiy5oXt]', i):
nums.append(0)
elif re.match(r'[.ceDkUQ1]', i):
nums.append(1)
elif re.match(r'[HvaNxJ@]', i):
nums.append(2)
elif re.match(r'[qB\]g*L6]', i):
nums.append(3)
elif re.match(r'[Gs0ZOdbp]', i):
nums.append(4)
elif re.match(r'[lw,92nM]', i):
nums.append(5)
elif re.match(r'[ETf/z7(I]', i):
nums.append(6)
elif re.match(r'[uCFSPYh]', i):
nums.append(7)
elif re.match(r'[3R#!8Km]', i):
nums.append(8)
else:
nums.append(9)
for i in self.password[8:12]: # 9th-12th characters of the password
if re.match(r'[Fw9@#oS]', i):
nums.append(0)
elif re.match(r'[P4hXqaJ]', i):
nums.append(1)
elif re.match(r'[]CgHzBR]', i):
nums.append(2)
elif re.match(r'[7,eQtLm]', i):
nums.append(3)
elif re.match(r'[Dp1U83(]', i):
nums.append(4)
elif re.match(r'[csfT.kZi]', i):
nums.append(5)
elif re.match(r'[MYn5/vW]', i):
nums.append(6)
elif re.match(r'[Ky!NGu0V]', i):
nums.append(7)
elif re.match(r'[O[)IlbE]', i):
nums.append(8)
else:
nums.append(9)
# Begin salting, first quartet
if (nums[3] - nums[1]) < (nums[2] + nums[0]):
if nums[2] > nums[3]:
if (nums[0] + nums[1] + nums[2] + nums[3]) >= 12:
salt.append(0)
else:
salt.append(6)
elif nums[1] == 4:
salt.append(7)
else:
salt.append(1)
elif ((nums[0] + nums[3]) % 2) == 0:
salt.append(5)
else:
salt.append(8)
# Begin salting, second quartet
if (nums[7] % 3) == 0:
if (nums[5] - nums[4]) > nums[6]:
salt.append(2)
elif ((nums[7] * nums[5])+nums[6]) > (nums[6] * 4):
salt.append(4)
elif (nums[4] + 5) > (nums[7] + 2):
salt.append(9)
else:
salt.append(7)
elif (nums[5] + 2) >= 7:
if nums[4] < 8:
salt.append(3)
else:
salt.append(0)
else:
salt.append(6)
# Begin salting, third quartet
if (nums[8] - nums[11] + nums[10]) > nums[9]:
if (((nums[10] + nums[9]) or (nums[10] - nums[11])) % 2) == 0:
salt.append(7)
elif nums[10] == (nums[8] or nums[9] or nums[11]):
salt.append(2)
else:
salt.append(4)
elif (nums[9] <= nums[11]) or (nums[10] <= nums[11]):
if nums[10] == (2 or 5 or 7 or 8):
salt.append(1)
elif nums[8] * 2 >= 9:
salt.append(5)
else:
salt.append(3)
else:
if nums[8] < 6:
salt.append(9)
else:
salt.append(8)
# Begin salting, all numbers
salt.append((nums[4] - nums[0] + (nums[7] + (nums[8] - nums[5]))
* nums[1] + ((nums[11] + nums[9] + nums[6]) - (nums[2] + nums[0]) * 5)
- nums[3] + nums[10]) % 10)
# Salting the encryption key
nums.insert(2, salt[0])
nums.insert(9, salt[1])
nums.insert(11, salt[2])
nums.insert(7, salt[3])
for i in range(len(nums)): # int to str converter
nums[i] = str(nums[i])
self._encryption_key = ''.join(nums) # The encryption key
del nums
del salt
print('Encryption key generated!')
else:
pass # Request denied by program
def encrypter(self):
"""This encrypts the text."""
key = lambda num: int(self._encryption_key[num]) # Return the numeric value from the key
text = self.text
for pair in range(len(text)):
# All equations with multiplication add a value to prevent multiplying by 0
text[pair] = int(str(text[pair])[:4] + self._encryption_key[7]
+ str(text[pair])[4:]) # Salting
text[pair] *= int(self._encryption_key[15]
+ self._encryption_key[1]) + 3 # ex.: *int('3'+'0') -> *30
text[pair] += 200 * (key(8) - key(3))
text[pair] -= 15
text[pair] -= (key(6) * key(12)) + key(0)
text[pair] *= (13 * key(13)) + 2
text[pair] -= key(2) - key(4) - key(5) + key(11)
text[pair] += key(10) * key(9)
text[pair] *= (key(14) ** 2) + 1
text = [str(p) for p in text] # Convert list to string and join
new_text = '/'.join(text)
return new_text # Return as string for writing to file
|
import logging
import os
import subprocess
_ffmpeg_valid_path = None
_ffprobe_valid_path = None
def get_ffmpeg_path(ffmpeg_path=None):
"""
Series of check up to lookup for a valid ffmpeg path. Cache the result to
avoid those check each time.
"""
global _ffmpeg_valid_path
if _ffmpeg_valid_path:
return _ffmpeg_valid_path
if ffmpeg_path:
if os.path.isdir(ffmpeg_path):
ffmpeg_path = os.path.join(ffmpeg_path, 'ffmpeg.exe')
if not os.path.exists(ffmpeg_path):
msg = '"%s" does not exist. Try with "ffmpeg" command.'
logging.warning(msg % ffmpeg_path)
ffmpeg_path = ffmpeg_path or "ffmpeg"
try:
subprocess.check_call('%s -version' % ffmpeg_path)
except subprocess.CalledProcessError:
raise Exception('FFmpeg not found.')
_ffmpeg_valid_path = ffmpeg_path
return _ffmpeg_valid_path
def get_ffprobe_path(ffprobe_path=None):
"""
Same as 'get_ffmpeg_path' for ffprobe
"""
global _ffprobe_valid_path
if _ffprobe_valid_path:
return _ffprobe_valid_path
if ffprobe_path:
if ffprobe_path.lower().endswith('ffmpeg.exe'):
ffprobe_path = os.path.dirname(ffprobe_path)
if os.path.isdir(ffprobe_path):
ffprobe_path = os.path.join(ffprobe_path, 'ffprobe.exe')
elif _ffmpeg_valid_path:
ffprobe_path = os.path.dirname(_ffmpeg_valid_path)
ffprobe_path = os.path.join(ffprobe_path, 'ffprobe.exe')
if ffprobe_path and not os.path.exists(ffprobe_path):
msg = '"%s" does not exist. Try with "ffprobe" command.'
logging.warning(msg % ffprobe_path)
ffprobe_path = ffprobe_path or "ffprobe"
try:
subprocess.check_call('%s -version' % ffprobe_path)
except subprocess.CalledProcessError:
raise Exception('FFmpeg not found.')
_ffprobe_valid_path = ffprobe_path
return _ffprobe_valid_path
|
__author__ = "samrohn77@gmail.com"
from bs4 import BeautifulSoup
import requests
import json
import datetime
import time
import settings
import traceback
import MySQLdb
class Database:
def __init__(self):
self.host = settings.host
self.user_name = settings.user_name
self.password = settings.password
self.db = settings.database
self.connection = MySQLdb.connect(self.host, self.user_name,
self.password, self.db)
self.cursor = self.connection.cursor()
def insert(self, query):
try:
self.cursor.execute(query)
self.connection.commit()
except Exception as e:
print query
print traceback.print_exc()
self.connection.rollback()
def update(self, query):
try:
self.cursor.execute(query)
self.connection.commit()
except Exception as e:
print query
print traceback.print_exc()
self.connection.rollback()
def query(self, query):
cursor = self.connection.cursor( MySQLdb.cursors.DictCursor )
cursor.execute(query)
return cursor
def __del__(self):
self.connection.close()
if __name__ == "__main__":
db = Database()
|
import os
import io
import yaml
import shutil
import tarfile
import warnings
import argparse
import numpy as np
import pandas as pd
from enum import Enum
from pathlib import Path
from functools import partial
from typing import Union, List, Callable
# CONSTANTS
ARCHIVES_FMT = ['.zip', '.tar', '.gz']
# Global utils
def argmax(listT:List[Union[int, float]]) -> int:
return listT.index(max(listT))
def arglast(listT:List[Union[int, float]]) -> int:
return lastk(listT, 1)
def topk(listT:List[Union[int, float]], k:int=5) -> List[int]:
return sorted(range(len(listT)), key=lambda i: listT[i])[-k:]
def lastk(listT:List[Union[int, float]], k:int=5) -> List[int]:
return len(listT) - k
def bestoflastk(listT:List[Union[int, float]], k:int=25) -> int:
listT = listT[-k:]
return listT.index(max(listT))
def kbestoflastl(listT:List[Union[int, float]]) -> List[int]:
pass #TODO generalized version of everything
reductions = ['argmax', 'arglast', 'topk', 'lastk']
# Groupby Utils
groupbies = ['as_is', 'in_or_else_group']
class Groupir():
r"""
How groups are formed
Note 1 : hideous piece of code but works, to review if complexity increase
Note 2 : https://www.youtube.com/watch?v=MydtkuxcKw4
"""
def __init__(self, grouptype, kwargs=None):
self.type = grouptype
assert self.type in groupbies
if self.type == 'in_or_else_group':
assert 'grouped_values' in kwargs
self.groups = kwargs.get('grouped_values')
if type(self.groups) != list:
self.groups = [self.groups]
def groupby(self, value):
if self.type == 'as_is':
return value
elif self.type == 'in_or_else_group':
return value if value in self.groups else 'else'
else:
raise NotImplementedError(f'The type {self.type} does not exists')
# OS utils
def fast_scandir(dirname):
subfolders= [f.path for f in os.scandir(dirname) if f.is_dir() and not 'checkpoint' in str(f)]
for dirname in list(subfolders):
subfolders.extend(fast_scandir(dirname))
return subfolders
# Tensorboard utils
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def get_scalars(scalars: List[str], exp_path:str):
""" Adapted from https://github.com/Spenhouet/tensorboard-aggregator/blob/master/aggregator.py """
scalar_accumulators = [EventAccumulator(exp_path).Reload().scalars for dname in os.listdir(exp_path)]
scalar_accumulators = [scalar_accumulator for scalar_accumulator in scalar_accumulators if scalar_accumulator.Keys()]
# Get and validate all scalar keys
all_keys = [tuple(scalar_accumulator.Keys()) for scalar_accumulator in scalar_accumulators]
if len(all_keys) == 0: return None
assert len(set(all_keys)) == 1, "All runs need to have the same scalar keys. There are mismatches in {}".format(all_keys)
keys = all_keys[0]
all_scalar_events_per_key = [[scalar_accumulator.Items(key) for scalar_accumulator in scalar_accumulators] for key in scalars]
values_per_key = [[[scalar_event.value for scalar_event in scalar_events] for scalar_events in all_scalar_events]
for all_scalar_events in all_scalar_events_per_key]
all_per_key = dict(zip(scalars, values_per_key))
return all_per_key
# Filters classes
from abc import ABC, abstractmethod
class Filter(ABC):
def __init__(self, key, value):
self.key = key
self.value = value
super().__init__()
@abstractmethod
def check(self):
pass
def __repr__(self):
return f'{type(self)} with key {self.key} and value {self.value}'
def explain(self):
print(f"--> The key {self.key} value is not {self.value} in this experiment.")
class AtomicFilter(Filter):
def check(self, other:Union[str, int, float]):
if not self.value == other:
self.explain()
return False
return True
class RangeFilter(Filter):
r""" Assert values are in a range (low <= value <= high)
Args:
value: a list of size 2 with index 0 the lower bound and 1 the higher bound of the range
"""
def __init__(self, key, value):
assert isinstance(value, list) and len(value) == 2 and value[1] > value[0]
super().__init__(key, value)
def check(self, other:Union[int, float]):
if not self.value[1] >= other or not self.value[0] <= other:
self.explain()
return False
return True
class ORFilterBank(Filter):
r"""
This is supposed to take a list of atomic-like filters and return true if one of these is true
"""
def check(self, other:Union[int, float, str]):
raise NotImplementedError("The ORFilter has not yet been implemented.")
class ANDFilterBank(Filter):
r"""
This is supposed to take a list of atomic-like filters and return true if all of these are true
"""
def check(self, other:Union[int, float, str]):
raise NotImplementedError("The ANDFilter has not yet been implemented.")
def report(vars_of_interest: List[str],
experiment_key_metric: str,
groupby: str,
experiment_filters: Union[List[Filter], Filter],
reduction_strategy: str='arglast',
log_dir:str='./',
groupby_kwargs:dict=None,
reduction_kwargs=None):
r"""
Produce a .csv report with all the `vars_of_interest` grouped by `groupby`
Args:
vars_of_interest: list of strings with the name of variables as they are logged in Tensorboard
experiment_key_metric: a string with the name of variable which the report is about (e.g. top1 accuracy). Is used to reduce the data.
groupby: by which hyperparameter are the experiments grouped
experiment_filters: list or single instance of Filter. Experiments whose hyperparameters do not comply with these filters won't be kept in the report.
reduction_strategy: how to reduce the data to a single datapoint. One of `reductions` list.
log_dir: where are the tensorboard logs stored
"""
# Ensure the experiment key metric is not in vars of interest
if experiment_key_metric in vars_of_interest: vars_of_interest.remove(experiment_key_metric)
# Get a list of all experiments, prepare list of results
experiments = fast_scandir(log_dir)#[f.path for f in os.scandir(log_dir) if f.is_dir()]
results = []
idx_tar = 0
groupby_type = groupby_kwargs.pop('type') if groupby_kwargs is not None else 'as_is'
groupir = Groupir(grouptype=groupby_type,
kwargs=groupby_kwargs)
for exp in experiments:
print(f"-> Processing experiment {exp}")
# Does the experiment folder adhere to lightning convention ?
hparams_path = f'{exp}/hparams.yaml'
if not os.path.isfile(hparams_path):
warnings.warn(f"The experiment {exp} does not have an hparams.yaml file. Skipping it.")
continue
# Parse hparams.yaml, check if `experiment_filter` are verified
hparams = yaml.load(io.open(hparams_path, 'r'), Loader=yaml.FullLoader)
assert groupby in hparams, f"{groupby} is not an existing hparam"
if is_experiment_filtered(hparams, experiment_filters):
print(f"---> The experiment {exp} is thus skipped.")
continue
# Parse tensorboard events and get the `vars_of_interest`+`hparams` into results as a dict
res = get_scalars(vars_of_interest+[experiment_key_metric], exp)
if res is not None:
# Now reduce to a value per experiment
idx_tar = apply_reduction_strategy(reduction_strategy, res[experiment_key_metric][0], reduction_kwargs) #TODO getting the 0th idx is not clean
res_dict = {
f'{var}_{groupby}{groupir.groupby(hparams[groupby])}': [res[var][0][idx_tar]] for var in vars_of_interest+[experiment_key_metric]
}
results.append(pd.DataFrame(res_dict))
else:
print(f"---> The experiment was empty :c")
# Produce .csv
#TODO are the NaN from concat an issue ?
with open(f'./report_per_{groupby}.csv', 'w') as f:
for filt in experiment_filters: f.write(f'# {filt}\n')
f.write(f'# {idx_tar} index kept\n')
pd.concat(results, ignore_index=True).to_csv(f'./report_per_{groupby}.csv', mode='a', index=False)
def is_experiment_filtered(hparams:dict,
experiment_filters:List[Filter]) -> bool:
for filt in experiment_filters:
# Does the experiment has the filter key ?
if not filt.key in hparams:
warnings.warn(f"--> The hparams.yaml file does not have this hyperparameter {filt.key}.")
return True
if not filt.check(hparams[filt.key]):
return True
return False
def apply_reduction_strategy(red:str, key_metric:list, kwargs=None):
return globals()[red](key_metric) if kwargs is None else globals()[red](key_metric, kwargs)
def parse_params(path:str) -> List[Filter]:
r"""
Parse a .yaml file comprising dict for each filter with key/value entries
"""
filts, metrics, groupby_kwargs = [], [], None
filts_raw = yaml.load(io.open(path, 'r'), Loader=yaml.FullLoader)
for filterDict in filts_raw:
if filterDict == 'Metrics':
metrics.extend(filts_raw[filterDict])
elif filterDict == 'Groupby':
groupby_kwargs = filts_raw[filterDict]
else:
nf = [globals()[filterDict](k, v) for k, v in filts_raw[filterDict].items()]
filts.extend(nf)
assert len(metrics) != 0 and len(filts) != 0
return filts, metrics, groupby_kwargs
def main(args):
r"""
Main process
"""
F_archive = False
p = Path(args.logdir)
if p.suffix in ARCHIVES_FMT:
if os.path.isdir(p.parent/ 'temp'): shutil.rmtree(p.parent/ 'temp') # ensure folder is empty
shutil.unpack_archive(p, p.parent/ 'temp') # unpack the archive in the empty folder ! c:
F_archive = True
args.logdir = p.parent / 'temp'
print(f'Successfully unpacked the archive at {args.logdir}')
filts, metrics, groupby_kwargs = parse_params(args.yaml)
report(vars_of_interest=metrics,
experiment_key_metric=args.target,
groupby=args.groupby,
experiment_filters=filts,
reduction_strategy=args.reduction,
groupby_kwargs=groupby_kwargs,
reduction_kwargs=args.reduction_kwargs,
log_dir=args.logdir)
if F_archive:
shutil.rmtree(args.logdir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--params-yaml', type=str, help='Path to your yaml.config file', default='example_filters.yaml', dest='yaml')
parser.add_argument('--logdir', type=str, help='Path to your tensorboard log dir (can be an archive file)', default='/export/tmp/henwood/lightning_logs')
parser.add_argument('--groupby', type=str, help='Hparams to group experiments', default='alpha')
parser.add_argument('--target', type=str, help='Experiment key metric, will be used to reduce the exp. to a single value', default="Acc/val_acc1")
parser.add_argument('--reduction', choices=reductions, type=str, help='How is reduced the experiment to a single value', default='arglast')
parser.add_argument('--reduction-kwargs', help='Extra parameter passed to Reduction')
# parser.add_argument('--metrics', nargs='+', help='All the variables to include in the report', default=['Cons/act_cons', 'Cons/cls_weight_cons'])
args = parser.parse_args()
main(args)
# filts = parse_filter(args.filters)
# report(vars_of_interest=args.metrics,
# experiment_key_metric=args.target,
# groupby=args.groupby,
# experiment_filters=filts,
# reduction_strategy=args.reduction,
# reduction_kwargs=args.reduction_kwargs,
# log_dir=args.logdir)
# /export/tmp/henwood/archive_logs/faulty_weights_exp.tar.gz
|
from .augmentation import *
from .compose import Compose
from .discard import FaceDiscarder
from .interpolate import ConditionalInterpolate, Interpolate
from .pad import Padding
from .rotate import Rotate
__all__ = [
"Compose",
"FaceDiscarder",
"ConditionalInterpolate",
"Interpolate",
"Padding",
"Rotate",
]
|
import numpy as np
import matplotlib.pyplot as plt
from signals import Signal, G_xx # custom module
def spectrogram(signal, binWidth, overlap):
"""
Generates the spectrogram of an input signal.
@param signal The input signal object
@return specs The values of the spectrogram
@return f The frequency spectrum
@return t The time domain
"""
try:
signal.name = signal.name
except AttributeError as e:
print('AttributeError: input is not a Signal object')
f = np.linspace(0, binWidth // 2 * signal.sampleRate // binWidth, binWidth // 2)
t = np.linspace(0, signal.length / signal.sampleRate, signal.length // binWidth * overlap)
starts = np.arange(0, signal.length, binWidth // overlap)
starts = np.append(starts, signal.length)
specs = np.zeros((binWidth // 2, np.shape(t)[0]))
for step in range(np.shape(starts)[0] - overlap - 1):
subsignal = Signal(sampleRate=signal.sampleRate,
length=starts[step + overlap] - starts[step],
values=signal.values[starts[step]:starts[step + overlap]])
specs[:, step] = G_xx(subsignal)
return specs, f, t
if __name__ == '__main__':
bird = Signal('Outside', 'beepbeep.wav')
bird.generateValsFromFile()
# plt.plot(bird.values)
# plt.show()
# plt.xlabel('Frequency (Hz)')
# x = np.linspace(0, bird.length // 2 * bird.freqRes, bird.length // 2)
# plt.ylabel('Intensity (some sort of 10*log10 thing)')
# y = G_xx(bird)
# plt.xlim(0, 5000)
# plt.plot(x, y)
# plt.show()
plt.figure(figsize=(7, 5))
plt.title(bird.name)
specs, f, t = spectrogram(bird, 1000, 200)
specs = specs[:, ::20]
t = t[::20]
print('Heatmap size:', np.shape(specs))
t, f = np.meshgrid(t, f)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.ylim(0, 6000)
plt.pcolormesh(t, f, specs, vmin=-20)
# plt.colorbar()
plt.savefig('../../figures/spec.png', dpi=300, bbox_inches='tight')
|
# -*- coding: utf-8 -*-
#
# Testing for access control list authorization.
#
# ------------------------------------------------
# imports
# -------
from sqlalchemy import and_
from flask import g
from .fixtures import authorize, Article, ArticleFactory
# helpers
# -------
def query(name, check):
return Article.query.filter(and_(
Article.name.contains(name),
Article.authorized(check)
)).all()
# session
# -------
class TestOtherPermissions(object):
def test_other_delete(self, client, reader, editor):
# other open read permissions
g.user = None
article = ArticleFactory.create(
name='Other Delete Open Article',
owner=editor,
group=editor.groups[0]
).set_permissions('001')
g.user = reader
assert authorize.delete(article)
assert query(article.name, 'delete')
# other closed read permissions
g.user = None
article = ArticleFactory.create(
name='Other Delete Closed Article',
owner=editor,
group=editor.groups[0]
).set_permissions('770')
g.user = reader
assert not authorize.delete(article)
assert not query(article.name, 'delete')
return
def test_other_read(self, client, reader, editor):
# other open read permissions
g.user = None
article = ArticleFactory.create(
name='Other Read Open Article',
owner=editor,
group=editor.groups[0]
).set_permissions('002')
g.user = reader
assert authorize.read(article)
assert query(article.name, 'read')
# other closed read permissions
g.user = None
article = ArticleFactory.create(
name='Other Read Closed Article',
owner=editor,
group=editor.groups[0]
).set_permissions('660')
g.user = reader
assert not authorize.read(article)
assert not query(article.name, 'read')
return
def test_other_update(self, reader, editor):
# other open update permissions
g.user = None
article = ArticleFactory.create(
name='Other Write Open Article',
owner=editor,
group=editor.groups[0]
).set_permissions('004')
g.user = reader
assert authorize.update(article)
assert query(article.name, 'update')
# other closed update permissions
g.user = None
article = ArticleFactory.create(
name='Other Write Closed Article',
owner=editor,
group=editor.groups[0]
).set_permissions('662')
g.user = reader
assert not authorize.update(article)
assert not query(article.name, 'update')
return
def test_other_custom(self, reader, editor):
# other open custom permissions
g.user = None
article = ArticleFactory.create(
name='Other Custom Open Article',
owner=editor,
group=editor.groups[0]
).set_permissions(other=['custom'])
g.user = reader
assert authorize.custom(article)
assert query(article.name, 'custom')
# other closed custom permissions
g.user = None
article = ArticleFactory.create(
name='Other Custom Closed Article',
owner=editor,
group=editor.groups[0]
)
g.user = reader
assert not authorize.custom(article)
assert not query(article.name, 'custom')
return
class TestOwnerPermissions(object):
def test_owner_delete(self, client, reader, editor):
g.user = reader
# other open read permissions
article = ArticleFactory.create(
name='Owner Delete Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('100')
assert authorize.delete(article)
assert query(article.name, 'delete')
# other closed read permissions
article = ArticleFactory.create(
name='Owner Delete Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('070')
assert not authorize.delete(article)
assert not query(article.name, 'delete')
return
def test_owner_read(self, client, reader, editor):
g.user = reader
# other open read permissions
article = ArticleFactory.create(
name='Owner Read Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('200')
assert authorize.read(article)
assert query(article.name, 'read')
# other closed read permissions
article = ArticleFactory.create(
name='Owner Read Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('170')
assert not authorize.read(article)
assert not query(article.name, 'read')
return
def test_owner_update(self, reader, editor):
g.user = reader
# other open update permissions
article = ArticleFactory.create(
name='Owner Write Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('400')
assert authorize.update(article)
assert query(article.name, 'update')
# other closed update permissions
article = ArticleFactory.create(
name='Owner Write Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('270')
assert not authorize.update(article)
assert not query(article.name, 'update')
return
def test_owner_custom(self, reader, editor):
g.user = reader
# other open update permissions
article = ArticleFactory.create(
name='Owner Custom Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions(owner=['custom'])
assert authorize.custom(article)
assert query(article.name, 'custom')
# other closed update permissions
article = ArticleFactory.create(
name='Owner Custom Closed Article',
owner=reader,
group=editor.groups[0]
)
assert not authorize.custom(article)
assert not query(article.name, 'custom')
return
class TestGroupPermissions(object):
def test_group_delete(self, client, reader, editor):
g.user = editor
# other open read permissions
article = ArticleFactory.create(
name='Group Delete Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('010')
assert authorize.delete(article)
assert query(article.name, 'delete')
# other closed read permissions
article = ArticleFactory.create(
name='Group Delete Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('700')
assert not authorize.delete(article)
assert not query(article.name, 'delete')
return
def test_group_read(self, client, reader, editor):
g.user = editor
# other open read permissions
article = ArticleFactory.create(
name='Group Read Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('020')
assert authorize.read(article)
assert query(article.name, 'read')
# other closed read permissions
article = ArticleFactory.create(
name='Group Read Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('710')
assert not authorize.read(article)
assert not query(article.name, 'read')
return
def test_group_update(self, reader, editor):
g.user = editor
# other open update permissions
article = ArticleFactory.create(
name='Group Write Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions('040')
assert authorize.update(article)
assert query(article.name, 'update')
# other closed update permissions
article = ArticleFactory.create(
name='Group Write Closed Article',
owner=reader,
group=editor.groups[0]
).set_permissions('720')
assert not authorize.update(article)
assert not query(article.name, 'update')
return
def test_group_custom(self, reader, editor):
g.user = editor
# other open update permissions
article = ArticleFactory.create(
name='Group Write Open Article',
owner=reader,
group=editor.groups[0]
).set_permissions(group=['custom'])
assert authorize.custom(article)
assert query(article.name, 'custom')
# other closed update permissions
article = ArticleFactory.create(
name='Group Write Closed Article',
owner=reader,
group=editor.groups[0]
)
assert not authorize.custom(article)
assert not query(article.name, 'custom')
return
|
from .json_queue import *
|
from sqlalchemy.orm import backref
from .import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__='users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255),nullable=False, unique=True)
email = db.Column(db.String(255), nullable=False, unique=True)
biog = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
dp_path = db.Column(db.String())
pitch = db.relationship('Pitch', backref='user', lazy=True)
comment = db.relationship('Comment',backref='user')
upvote = db.relationship('Upvote', backref='user', lazy='dynamic')
downvote = db.relationship('Downvote', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Pitch(db.Model):
__tablename__='pitches'
id = db.Column(db.Integer, primary_key=True)
category = db.Column(db.String(255))
content = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_time = db.Column(db.DateTime, default=datetime.utcnow())
comments = db.relationship('Comment', backref='pitch', lazy=True)
upvote = db.relationship('Upvote', backref='pitch', lazy='dynamic')
downvote = db.relationship('Downvote', backref='pitch', lazy='dynamic')
def save_pitch(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_category(cls, category):
pitches = Pitch.query.filter_by(category=category).all()
return pitches
def __repr__(self):
return f'Pitch{self.content}'
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'),nullable = False)
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'),nullable = False)
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_upvotes(cls,pitch_id):
upvote = Upvote.query.filter_by(pitch_id=pitch_id).all()
return upvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
class Downvote(db.Model):
__tablename__ = 'downvotes'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'),nullable = False)
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'),nullable = False)
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_downvotes(cls,pitch_id):
downvote = Downvote.query.filter_by(pitch_id=pitch_id).all()
return downvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key=True)
pitch_id = db.Column(db.Integer, db.ForeignKey('pitches.id'))
comment = db.Column(db.String(), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,pitch_id):
comments = Comment.query.filter_by(pitch_id=pitch_id)
return comments
def __repr__(self):
return f'Comment:{self.comment}'
|
# Generated by Django 2.0.2 on 2018-03-10 14:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracks', '0002_auto_20180310_1724'),
]
operations = [
migrations.AlterField(
model_name='tracks',
name='track_name',
field=models.CharField(blank=True, max_length=250),
),
]
|
from manimlib.imports import *
class CompareACDC(Scene):
def construct(self):
self.show_titles()
self.show_dc_examples()
self.show_ac_examples()
self.wait(2)
def show_titles(self):
# SS6.1
self.DC_title = TextMobject("\\underline{DC}")\
.scale(2.5)\
.to_edge(UP)\
.shift(FRAME_WIDTH*0.25*LEFT)
self.play(
Write(
self.DC_title
)
)
# SS6.2
self.AC_title = TextMobject("\\underline{AC}")\
.scale(2.5)\
.to_edge(UP)\
.shift(FRAME_WIDTH*0.25*RIGHT)
self.play(
Write(
self.AC_title
)
)
def show_dc_examples(self):
# SS6.3
phone_image=ImageMobject("images/ep1/CompareACDC/cell-phone.png") \
.scale(2.5) \
.next_to(self.DC_title, direction=DOWN, buff=0.5) \
.shift(3 * LEFT)
self.play(
FadeInFrom(
phone_image,
direction=LEFT
)
)
# SS6.4
computer=ImageMobject("images/ep1/CompareACDC/computer.jpeg") \
.scale(2) \
.next_to(phone_image, direction=RIGHT, buff=0) \
.shift(1.2 * LEFT)
self.play(
FadeIn(
computer
)
)
def show_ac_examples(self):
outlet_US = ImageMobject("images/ep1/CompareACDC/outlet-US.jpg")\
.scale(1.8)\
.next_to(self.AC_title,direction=DOWN,buff=0.3)\
.shift(1*LEFT)
self.play(
FadeInFrom(
outlet_US,
direction=RIGHT
)
)
outlet_EU = ImageMobject("images/ep1/CompareACDC/outlet-EU.jpg")\
.scale(1.7)\
.next_to(outlet_US,direction=RIGHT,buff=0.1)
self.play(
FadeInFrom(
outlet_EU,
direction=RIGHT
)
)
utility_pole = ImageMobject("images/ep1/CompareACDC/utility_pole.jpg")\
.scale(2)\
.next_to(self.AC_title,direction=DOWN,buff=4.25)
self.play(
FadeInFrom(
utility_pole,
direction=RIGHT
)
)
|
from selenium.webdriver.chrome.options import Options
def pytest_setup_options():
"""called before webdriver is initialized"""
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
return options
|
# Copyright 2021 MosaicML. All Rights Reserved.
import numpy as np
import pytest
import torch
from composer.algorithms import ChannelsLastHparams
from composer.core.event import Event
from composer.core.state import State
from composer.core.types import DataLoader, Model, Precision, Tensor
def _has_singleton_dimension(tensor: Tensor) -> bool:
return any(s == 1 for s in tensor.shape)
def _infer_memory_format(tensor: Tensor) -> str:
if _has_singleton_dimension(tensor):
raise ValueError(f'Tensor of shape {tensor.shape} has singleton dimensions, '
'memory format cannot be infered from strides.')
base_order = list('nchw') # type: ignore
strides = tensor.stride()
if isinstance(strides, tuple) and len(strides) == 4:
order = np.argsort(strides)
# smallest stride should be last in format, so reverse order
memory_format = ''.join([base_order[o] for o in reversed(order)])
return memory_format
else:
raise ValueError(f'Tensor must be 4-dim, got shape {tensor.shape}')
@pytest.fixture()
def state(simple_conv_model: Model, dummy_train_dataloader: DataLoader, dummy_val_dataloader: DataLoader):
return State(
model=simple_conv_model,
train_batch_size=100,
eval_batch_size=100,
precision=Precision.FP32,
grad_accum=1,
max_epochs=10,
train_dataloader=dummy_train_dataloader,
eval_dataloader=dummy_val_dataloader,
)
def test_channels_last_algorithm(state, dummy_logger):
channels_last = ChannelsLastHparams().initialize_object()
assert state.model is not None
assert _infer_memory_format(state.model.module.conv1.weight) == 'nchw'
channels_last.apply(Event.TRAINING_START, state, dummy_logger)
assert _infer_memory_format(state.model.module.conv1.weight) == 'nhwc'
"""
Test helper utility _infer_memory_format
"""
@pytest.fixture(params=[True, False])
def tensor(request) -> Tensor:
strided = request.param
tensor = torch.randn((16, 32, 32, 64))
if strided:
tensor = tensor[::2, ::2, ::2, ::2]
return tensor
def test_infer_memory_format_nhwc(tensor):
tensor = tensor.to(memory_format=torch.channels_last)
assert _infer_memory_format(tensor) == 'nhwc'
def test_infer_memory_format_nchw(tensor):
tensor = tensor.to(memory_format=torch.contiguous_format)
assert _infer_memory_format(tensor) == 'nchw'
def test_infer_memory_format_wcnh(tensor):
tensor = tensor.to(memory_format=torch.contiguous_format)
tensor = tensor.permute(2, 1, 3, 0)
assert _infer_memory_format(tensor) == 'wcnh'
def test_infer_memory_format_incorrect_ndims():
tensor = torch.randn((16, 32, 32))
with pytest.raises(ValueError):
_infer_memory_format(tensor)
def test_infer_memory_format_singleton():
tensor = torch.randn((16, 32, 1, 64))
with pytest.raises(ValueError):
_infer_memory_format(tensor)
|
from setuptools import setup
from Cython.Build import cythonize
print("Compilando encoder.pyx\n\n")
setup(ext_modules = cythonize("encoder.pyx"),)
print("\n\nCompilando conversor.pyx\n\n")
setup(ext_modules = cythonize("conversor.pyx"),)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from icecream import install
from icecream import ic
install()
ic.configureOutput(includeContext=True)
from B import foo
if __name__ == "__main__":
ic("Hello World")
foo()
|
import sys
import typing
import numpy as np
from scipy import signal
class ModConvolve():
def __call__(
self,
f: np.ndarray,
g: np.ndarray,
) -> np.ndarray:
mod = self.__mod
N: typing.Final[int] = 10
BASE: typing.Final[int] = 1 << N
f, f0 = np.divmod(f, BASE)
f2, f1 = np.divmod(f, BASE)
g, g0 = np.divmod(g, BASE)
g2, g1 = np.divmod(g, BASE)
h0 = self.__conv(f0, g0)
ha = self.__conv(f1, g1)
h4 = self.__conv(f2, g2)
h1 = self.__conv(f0 + f1, g0 + g1) - h0 - ha
h3 = self.__conv(f1 + f2, g1 + g2) - ha - h4
h2 = self.__conv(f0 + f2, g0 + g2) - h0 - h4 + ha
h = (h4 << N * 2) + (h3 << N) + h2
h = (h % mod << N * 2) + (h1 << N) + h0
return h % mod
def __conv(
self,
f: np.ndarray,
g: np.ndarray,
) -> np.ndarray:
h = signal.fftconvolve(f, g)
return np.rint(h).astype(np.int64) % self.__mod
def __init__(
self,
mod: int,
) -> typing.NoReturn:
self.__mod = mod
mod = 998_244_353
def main() -> typing.NoReturn:
n, m = map(int, input().split())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
b = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
convolve = ModConvolve(mod)
print(*convolve(a, b))
main()
|
"""Unit test package for class_enrollment_simulations."""
|
"""
File: sierpinski.py
Name:
---------------------------
This file recursively prints the Sierpinski triangle on GWindow.
The Sierpinski triangle is a fractal described in 1915 by Waclaw Sierpinski.
It is a self similar structure that occurs at different levels of iterations.
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GLine
from campy.gui.events.timer import pause
# Constants
ORDER = 6 # Controls the order of Sierpinski Triangle
LENGTH = 600 # The length of order 1 Sierpinski Triangle
UPPER_LEFT_X = 150 # The upper left x coordinate of order 1 Sierpinski Triangle
UPPER_LEFT_Y = 100 # The upper left y coordinate of order 1 Sierpinski Triangle
WINDOW_WIDTH = 950 # The width of the GWindow
WINDOW_HEIGHT = 700 # The height of the GWindow
# Global Variable
window = GWindow(width=WINDOW_WIDTH, height=WINDOW_HEIGHT) # The canvas to draw Sierpinski Triangle
def main():
"""
TODO: The program will draw Sierpinski triangles.
"""
sierpinski_triangle(ORDER, LENGTH, UPPER_LEFT_X, UPPER_LEFT_Y)
def sierpinski_triangle(order, length, upper_left_x, upper_left_y):
"""
:param order: the levels of the picture.
:param length: the triangle's length
:param upper_left_x: the point of x-axis at the upper point
:param upper_left_y: the point of y-axis at the upper point
"""
if order == 0:
pass
else:
add_triangle(length, upper_left_x, upper_left_y)
length /= 2
order -= 1
sierpinski_triangle(order, length, upper_left_x, upper_left_y)
sierpinski_triangle(order, length, upper_left_x + length, upper_left_y)
sierpinski_triangle(order, length, upper_left_x + length / 2, upper_left_y + length * 0.886)
def add_triangle(length, upper_left_x, upper_left_y):
"""
The function will draw a triangle.
"""
middle_x = upper_left_x + length / 2
middle_y = upper_left_y + length * 0.886
upper_right_x = upper_left_x + length
upper_right_y = upper_left_y
left_line = GLine(upper_left_x, upper_left_y, middle_x, middle_y)
upper_line = GLine(upper_left_x, upper_left_y, upper_right_x, upper_right_y)
right_line = GLine(upper_right_x, upper_right_y, middle_x, middle_y)
window.add(left_line)
window.add(upper_line)
window.add(right_line)
if __name__ == '__main__':
main()
|
## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
from CvPythonExtensions import *
import CvUtil
import Popup as PyPopup
import PyHelpers
PyPlayer = PyHelpers.PyPlayer
PyCity = PyHelpers.PyCity
gc = CyGlobalContext()
local = CyTranslator()
def isWBPopup(context):
"helper for determining if context is a WBPopup"
return (context >= CvUtil.PopupTypeWBContextStart and context <= CvUtil.PopupTypeWBContextEnd)
class CvWBPopups:
"World Builder App Popups"
# These ar popup types, not popup context IDs
WBPopupEditCity = WorldBuilderPopupTypes.WBPOPUP_CITY
WBPopupEditUnit = WorldBuilderPopupTypes.WBPOPUP_UNIT
WBPopupEditPlot = WorldBuilderPopupTypes.WBPOPUP_PLOT
WBPopupEditPlayer = WorldBuilderPopupTypes.WBPOPUP_PLAYER
def __init__(self):
"set global instance items"
self.WBPopupDictionary = {
self.WBPopupEditCity : { 'Init' : self.initEditCity, 'Apply' : self.applyEditCity },
}
def initWB(self, argsList):
"called from Interface - use the PopupDictionary for quick access"
context = argsList[0]
entry = self.WBPopupDictionary.get(context)
if (entry):
init = entry.get('Init')
return init(argsList)
return 0
def applyWB(self, argsList):
"Apply the appropriate Popup using the PopupDictionary"
context = argsList[0]
entry = self.WBPopupDictionary.get(context-CvUtil.PopupTypeWBContextStart+WorldBuilderPopupTypes.WBPOPUP_START)
if (entry):
apply = entry.get('Apply')
return apply(argsList)
return 0
########################################################################################
def initEditCity(self, argsList):
' Initializes World Builder City Edit Popup '
px,py = argsList
pCity = CyMap().plot(px,py).getPlotCity()
iOwner = pCity.getOwner()
iID = pCity.getID()
userData = (iOwner, iID)
pCity = PyCity(iOwner, iID)
# create popup
popup = PyPopup.PyPopup( CvUtil.EventEditCity, EventContextTypes.EVENTCONTEXT_ALL )
popup.setSize(400,600)
popup.setUserData( userData )
popup.setHeaderString(local.getText("TXT_KEY_WB_HEADER_CITY_EDIT", ()))
# City Name - Editbox 0
popup.createEditBox( pCity.getName(), 0 )
# Population - Editbox 1
popup.setBodyString(local.getText("TXT_KEY_WB_CITY_EDIT_POP", ()))
popup.createEditBox( '0', 1 )
# Culture - Editbox 2
popup.setBodyString(local.getText("TXT_KEY_WB_CITY_EDIT_CULTURE", (pCity.getCulture(),)))
popup.createEditBox( '0', 2)
# Buildings - Listboxes Group
popup.createListBox(0)
iNumBuildings = gc.getNumBuildingInfos()
lBuildings = []
for i in range( iNumBuildings ):
# ('Library', iIndex)
lBuildings.append( (str(gc.getBuildingInfo(i).getDescription()), i) )
lBuildings.sort()
popup.addListBoxString( local.getText("TXT_KEY_WB_CITY_NOTHING", ()), -1)
popup.addListBoxString( local.getText("TXT_KEY_WB_CITY_ALL", ()), iNumBuildings) #for adding/removing every building
for i in range( len(lBuildings) ):
entry = lBuildings[i]
popup.addListBoxString(entry[0], entry[1])
# Add/Remove - Pulldown Group 0
popup.setBodyString(local.getText("TXT_KEY_WB_CITY_ADD_REMOVE", ()))
popup.createPullDown(0)
popup.addPullDownString( local.getText("TXT_KEY_WB_CITY_ADD", ()), 1)
popup.addPullDownString( local.getText("TXT_KEY_WB_CITY_NOCHANGE", ()), 0) #for clean exit
popup.addPullDownString( local.getText("TXT_KEY_WB_CITY_REMOVE", ()), 2)
# Launch Popup
popup.launch()
return 0
def applyEditCity( self, argsList):
' Apply World Builder City Edit'
popupReturn, userData = argsList
iOwner, iID = userData
pCity = PyCity( iOwner, iID )
if pCity.isNone():
return 0
# EDITABLE: Name, Population, Culture, Buildings
# Name
newName = str(popupReturn.getEditBoxString( 0 ))
currName = pCity.getName()
if ( newName != currName ):
pCity.setName( newName, False )
# Population
PopModifier = int(popupReturn.getEditBoxString( 1 ))
if PopModifier:
pCity.changePopulation( PopModifier )
iCultureModifier = int(popupReturn.getEditBoxString( 2 ))
if iCultureModifier:
pCity.setCulture( iCultureModifier )
# Buildings
iNumBuildings = gc.getNumBuildingInfos() #get total # of units from Game
if ( popupReturn.getSelectedListBoxValue(0)>=0 and popupReturn.getSelectedPullDownValue(0)>0 ):
BuildingIdx = popupReturn.getSelectedListBoxValue(0)
if (BuildingIdx == iNumBuildings+1):
return 0
AddRemoveIdx = popupReturn.getSelectedPullDownValue(0)
CvUtil.AdjustBuilding(AddRemoveIdx==1, BuildingIdx==iNumBuildings, BuildingIdx, pCity)
return 0
|
import logging
from src.backup.backup_process import BackupProcess
from src.backup.scheduler.task_creator import TaskCreator
from src.commons.big_query.big_query import BigQuery
from src.commons.big_query.big_query_table_metadata import BigQueryTableMetadata
from src.commons.tasks import Tasks
class TableBackup(object):
@staticmethod
def start(table_reference):
big_query = BigQuery()
big_query_table_metadata = BigQueryTableMetadata.get_table_by_reference(
table_reference)
if big_query_table_metadata.is_daily_partitioned() and \
not big_query_table_metadata.is_partition():
logging.info(u'Table %s:%s.%s is partitioned',
table_reference.get_project_id(),
table_reference.get_dataset_id(),
table_reference.get_table_id())
TableBackup._schedule_partitioned_table_backup_scheduler_task(
table_reference)
else:
BackupProcess(table_reference=table_reference,
big_query=big_query,
big_query_table_metadata=big_query_table_metadata).start()
@staticmethod
def _schedule_partitioned_table_backup_scheduler_task(table_reference):
Tasks.schedule('backup-scheduler',
TaskCreator.create_partitioned_table_backup_scheduler_task(
project_id=table_reference.get_project_id(),
dataset_id=table_reference.get_dataset_id(),
table_id=table_reference.get_table_id()))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 09:33:41 2021
@author: Erlend Tøssebro
"""
import matplotlib.pyplot as plt
import random
karakterer = ["A", "B", "C", "D", "E", "F"]
antall = [5, 15, 40, 24, 18, 26]
# Subplot, parametere: antall rader, antall kolonner, hvilken er dette
# For den tredje så starter nummereringen på 1 og går først horisontalt
# deretter vertikalt.
plt.subplot(1, 3, 1)
plt.bar(karakterer, antall, color=("red", "orange", "yellow", "green", "blue", "magenta"))
plt.title("Karakterfordeling, stolper")
plt.subplot(1, 3, 2)
plt.pie(antall, labels=karakterer)
plt.title("Karakterfordeling, kakediagram")
plt.subplot(1, 3, 3)
verdier = []
for i in range(20000):
verdi = random.random() + random.random() + random.random()
verdier.append(verdi)
plt.hist(verdier, 40)
plt.title("3 tilfeldige tall")
plt.show()
|
# pylint: disable=missing-docstring, redefined-outer-name
import os
from typing import Generator, List
import pytest
from fideslang.models import System, SystemMetadata
from py._path.local import LocalPath
from fidesctl.connectors.models import OktaConfig
from fidesctl.core import api
from fidesctl.core import system as _system
from fidesctl.core.config import FidesctlConfig
def create_server_systems(test_config: FidesctlConfig, systems: List[System]) -> None:
for system in systems:
api.create(
url=test_config.cli.server_url,
resource_type="system",
json_resource=system.json(exclude_none=True),
headers=test_config.user.request_headers,
)
def delete_server_systems(test_config: FidesctlConfig, systems: List[System]) -> None:
for system in systems:
api.delete(
url=test_config.cli.server_url,
resource_type="system",
resource_id=system.fides_key,
headers=test_config.user.request_headers,
)
@pytest.fixture(scope="function")
def create_test_server_systems(
test_config: FidesctlConfig, redshift_systems: List[System]
) -> Generator:
systems = redshift_systems
delete_server_systems(test_config, systems)
create_server_systems(test_config, systems)
yield
delete_server_systems(test_config, systems)
@pytest.fixture(scope="function")
def create_external_server_systems(test_config: FidesctlConfig) -> Generator:
systems = _system.generate_redshift_systems(
organization_key="default_organization",
aws_config={},
) + _system.generate_rds_systems(
organization_key="default_organization",
aws_config={},
)
delete_server_systems(test_config, systems)
create_server_systems(test_config, systems)
yield
delete_server_systems(test_config, systems)
@pytest.fixture()
def redshift_describe_clusters() -> Generator:
describe_clusters = {
"Clusters": [
{
"ClusterIdentifier": "redshift-cluster-1",
"Endpoint": {
"Address": "redshift-cluster-1.c2angfh5kpo4.us-east-1.redshift.amazonaws.com",
"Port": 5439,
},
"ClusterNamespaceArn": "arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149176",
},
{
"ClusterIdentifier": "redshift-cluster-2",
"Endpoint": {
"Address": "redshift-cluster-2.c2angfh5kpo4.us-east-1.redshift.amazonaws.com",
"Port": 5439,
},
"ClusterNamespaceArn": "arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149177",
},
]
}
yield describe_clusters
@pytest.fixture()
def redshift_systems() -> Generator:
redshift_systems = [
System(
fides_key="redshift-cluster-1",
organization_fides_key="default_organization",
name="redshift-cluster-1",
description="Fides Generated Description for Redshift Cluster: redshift-cluster-1",
fidesctl_meta=SystemMetadata(
endpoint_address="redshift-cluster-1.c2angfh5kpo4.us-east-1.redshift.amazonaws.com",
endpoint_port="5439",
resource_id="arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149176",
),
system_type="redshift_cluster",
privacy_declarations=[],
),
System(
fides_key="redshift-cluster-2",
organization_fides_key="default_organization",
name="redshift-cluster-2",
description="Fides Generated Description for Redshift Cluster: redshift-cluster-2",
fidesctl_meta=SystemMetadata(
endpoint_address="redshift-cluster-2.c2angfh5kpo4.us-east-1.redshift.amazonaws.com",
endpoint_port="5439",
resource_id="arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149177",
),
system_type="redshift_cluster",
privacy_declarations=[],
),
]
yield redshift_systems
@pytest.fixture()
def rds_systems() -> Generator:
rds_systems = [
System(
fides_key="database-2",
organization_fides_key="default_organization",
name="database-2",
description="Fides Generated Description for RDS Cluster: database-2",
fidesctl_meta=SystemMetadata(
endpoint_address="database-2.cluster-ckrdpkkb4ukm.us-east-1.rds.amazonaws.com",
endpoint_port="3306",
resource_id="arn:aws:rds:us-east-1:910934740016:cluster:database-2",
),
system_type="rds_cluster",
privacy_declarations=[],
),
System(
fides_key="database-1",
organization_fides_key="default_organization",
name="database-1",
description="Fides Generated Description for RDS Instance: database-1",
fidesctl_meta=SystemMetadata(
endpoint_address="database-1.ckrdpkkb4ukm.us-east-1.rds.amazonaws.com",
endpoint_port="3306",
resource_id="arn:aws:rds:us-east-1:910934740016:db:database-1",
),
system_type="rds_instance",
privacy_declarations=[],
),
]
yield rds_systems
@pytest.fixture()
def rds_describe_clusters() -> Generator:
describe_clusters = {
"DBClusters": [
{
"DBClusterIdentifier": "database-2",
"Endpoint": "database-2.cluster-ckrdpkkb4ukm.us-east-1.rds.amazonaws.com",
"Port": 3306,
"DBClusterArn": "arn:aws:rds:us-east-1:910934740016:cluster:database-2",
},
]
}
yield describe_clusters
@pytest.fixture()
def rds_describe_instances() -> Generator:
describe_instances = {
"DBInstances": [
{
"DBInstanceIdentifier": "database-1",
"Endpoint": {
"Address": "database-1.ckrdpkkb4ukm.us-east-1.rds.amazonaws.com",
"Port": 3306,
},
"DBInstanceArn": "arn:aws:rds:us-east-1:910934740016:db:database-1",
},
]
}
yield describe_instances
@pytest.mark.unit
def test_get_system_resource_ids(redshift_systems: List[System]) -> None:
expected_result = [
"arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149176",
"arn:aws:redshift:us-east-1:910934740016:namespace:057d5b0e-7eaa-4012-909c-3957c7149177",
]
actual_result = _system.get_system_resource_ids(redshift_systems)
assert actual_result == expected_result
@pytest.mark.unit
def test_find_missing_systems(
redshift_systems: List[System], rds_systems: List[System]
) -> None:
source_systems = rds_systems + redshift_systems
existing_systems = redshift_systems
actual_result = _system.find_missing_systems(
source_systems=source_systems, existing_systems=existing_systems
)
assert actual_result == rds_systems
@pytest.mark.integration
def test_get_all_server_systems(
test_config: FidesctlConfig, create_test_server_systems: Generator
) -> None:
actual_result = _system.get_all_server_systems(
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
exclude_systems=[],
)
assert actual_result
@pytest.mark.external
def test_scan_system_aws_passes(
test_config: FidesctlConfig, create_external_server_systems: Generator
) -> None:
_system.scan_system_aws(
coverage_threshold=100,
manifest_dir="",
organization_key="default_organization",
aws_config=None,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
@pytest.mark.external
def test_generate_system_aws(tmpdir: LocalPath, test_config: FidesctlConfig) -> None:
actual_result = _system.generate_system_aws(
file_name=f"{tmpdir}/test_file.yml",
include_null=False,
organization_key="default_organization",
aws_config=None,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
assert actual_result
OKTA_ORG_URL = "https://dev-78908748.okta.com"
@pytest.mark.external
def test_generate_system_okta(tmpdir: LocalPath, test_config: FidesctlConfig) -> None:
actual_result = _system.generate_system_okta(
file_name=f"{tmpdir}/test_file.yml",
include_null=False,
okta_config=OktaConfig(
orgUrl=OKTA_ORG_URL,
token=os.environ["OKTA_CLIENT_TOKEN"],
),
)
assert actual_result
@pytest.mark.external
def test_scan_system_okta_success(
tmpdir: LocalPath, test_config: FidesctlConfig
) -> None:
file_name = f"{tmpdir}/test_file.yml"
_system.generate_system_okta(
file_name=file_name,
include_null=False,
okta_config=OktaConfig(
orgUrl=OKTA_ORG_URL,
token=os.environ["OKTA_CLIENT_TOKEN"],
),
)
_system.scan_system_okta(
manifest_dir=file_name,
okta_config=OktaConfig(
orgUrl=OKTA_ORG_URL,
token=os.environ["OKTA_CLIENT_TOKEN"],
),
coverage_threshold=100,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
assert True
@pytest.mark.external
def test_scan_system_okta_fail(tmpdir: LocalPath, test_config: FidesctlConfig) -> None:
with pytest.raises(SystemExit):
_system.scan_system_okta(
manifest_dir="",
okta_config=OktaConfig(
orgUrl=OKTA_ORG_URL,
token=os.environ["OKTA_CLIENT_TOKEN"],
),
coverage_threshold=100,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
|
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from rr.forms.redirecturi import RedirectUriForm
from rr.models.redirecturi import RedirectUri
from rr.utils.serviceprovider import get_service_provider
logger = logging.getLogger(__name__)
@login_required
def redirecturi_list(request, pk):
"""
Displays a list of :model:`rr.RedirectUri` linked to
:model:`rr.ServiceProvider`.
Includes a ModelForm for adding :model:`rr.RedirectUri` to
:model:`rr.ServiceProvider`.
**Context**
``object_list``
List of :model:`rr.RedirectUri`.
``form``
ModelForm for creating a :model:`rr.RedirectUri`
``object``
An instance of :model:`rr.ServiceProvider`.
**Template:**
:template:`rr/redirecturi.html`
"""
sp = get_service_provider(pk, request.user, service_type=["oidc"])
form = RedirectUriForm(sp=sp)
if request.method == "POST":
if "add_redirecturi" in request.POST:
form = _add_redirecturi(request, sp)
elif "remove_redirecturi" in request.POST:
_remove_redirecturis(request, sp)
redirect_uris = RedirectUri.objects.filter(sp=sp, end_at=None).order_by('uri')
return render(request, "rr/redirecturi.html", {'object_list': redirect_uris,
'form': form,
'object': sp})
def _add_redirecturi(request, sp):
form = RedirectUriForm(request.POST, sp=sp)
if form.is_valid():
uri = form.cleaned_data['uri']
RedirectUri.objects.create(sp=sp, uri=uri)
sp.save_modified()
logger.info("Redirect URL added for {sp} by {user}".format(sp=sp, user=request.user))
form = RedirectUriForm(sp=sp)
messages.add_message(request, messages.INFO, _('Redirect URL added.'))
return form
def _remove_redirecturis(request, sp):
for key, value in request.POST.dict().items():
if value == "on":
user_group = RedirectUri.objects.get(pk=key)
if user_group.sp == sp:
user_group.end_at = timezone.now()
user_group.save()
sp.save_modified()
logger.info("Redirect URL removed from {sp} by {user}".format(
sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Redirect URL removed.'))
|
# encoding: UTF-8
#
# Copyright 2012-2013 Alejandro Autalán
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://pygubu.web.here
from __future__ import unicode_literals
import platform
import logging
try:
import tkinter as tk
except:
import Tkinter as tk
from pygubu.builder import builderobject
logger = logging.getLogger(__name__)
# translator marker
def _(x):
return x
TK_BITMAPS = (
'error', 'gray75', 'gray50', 'gray25', 'gray12',
'hourglass', 'info', 'questhead', 'question', 'warning',
)
TK_BITMAPS_MAC = (
'document', 'stationery', 'edition', 'application', 'accessory',
'forder', 'pfolder', 'trash', 'floppy', 'ramdisk', 'cdrom',
'preferences', 'querydoc', 'stop', 'note', 'caution'
)
TK_CURSORS = (
'arrow', 'based_arrow_down', 'based_arrow_up', 'boat',
'bogosity', 'bottom_left_corner', 'bottom_right_corner',
'bottom_side', 'bottom_tee', 'box_spiral', 'center_ptr',
'circle', 'clock', 'coffee_mug', 'cross', 'cross_reverse',
'crosshair', 'diamond_cross', 'dot', 'dotbox', 'double_arrow',
'draft_large', 'draft_small', 'draped_box', 'exchange', 'fleur',
'gobbler', 'gumby', 'hand1', 'hand2', 'heart', 'icon',
'iron_cross', 'left_ptr', 'left_side', 'left_tee', 'leftbutton',
'll_angle', 'lr_angle', 'man', 'middlebutton', 'mouse', 'none',
'pencil', 'pirate', 'plus', 'question_arrow', 'right_ptr',
'right_side', 'right_tee', 'rightbutton', 'rtl_logo',
'sailboat', 'sb_down_arrow', 'sb_h_double_arrow',
'sb_left_arrow', 'sb_right_arrow', 'sb_up_arrow',
'sb_v_double_arrow', 'shuttle', 'sizing', 'spider', 'spraycan',
'star', 'target', 'tcross', 'top_left_arrow', 'top_left_corner',
'top_right_corner', 'top_side', 'top_tee', 'trek', 'ul_angle',
'umbrella', 'ur_angle', 'watch', 'xterm', 'X_cursor')
TK_CURSORS_WINDOWS = (
'no', 'starting', 'size', 'size_ne_sw'
'size_ns', 'size_nw_se', 'size_we','uparrow', 'wait'
)
TK_CURSORS_MAC = (
'copyarrow', 'aliasarrow', 'contextualmenuarrow', 'text',
'cross-hair', 'closedhand', 'openhand', 'pointinghand',
'resizeleft', 'resizeright', 'resizeleftright', 'resizeup',
'resizedown', 'resizeupdown', 'notallowed', 'poof',
'countinguphand', 'countingdownhand', 'countingupanddownhand', 'spinning'
)
if platform.system() == 'Darwin':
TK_BITMAPS = TK_BITMAPS + TK_BITMAPS_MAC
TK_CURSORS = TK_CURSORS + TK_CURSORS_MAC
elif platform.system() == 'Windows':
TK_CURSORS = TK_CURSORS + TK_CURSORS_WINDOWS
TK_RELIEFS = (tk.FLAT, tk.RAISED, tk.SUNKEN, tk.GROOVE, tk.RIDGE)
TK_WIDGET_OPTIONS = {
'accelerator': {
'editor': 'entry'},
'activerelief': {
'editor': 'choice',
'params': {
'values': ('', tk.FLAT, tk.RAISED, tk.SUNKEN,
tk.GROOVE, tk.RIDGE),
'state': 'readonly'}},
'activestyle': {
'editor': 'choice',
'params': {
'values': ('', 'underline', 'dotbox', 'none'),
'state': 'readonly'}},
'activebackground': {
'editor': 'colorentry'},
'activeborderwidth': {
'editor': 'entry'},
'activeforeground': {
'editor': 'colorentry'},
'after': {
'editor': 'entry'},
# ttk.Label
'anchor': {
'editor': 'choice',
'params': {'values': ('', tk.W, tk.CENTER, tk.E),
'state': 'readonly'},
'tk.Button': {
'params': {
'values': (
'', 'n', 'ne', 'nw', 'e', 'w', 's', 'se', 'sw', 'center'),
'state': 'readonly'}},
},
'aspect': {
'editor': 'entry'},
'autoseparators': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Label
'background': {
'editor': 'colorentry'},
# ttk.Frame, ttk.Label
'borderwidth': {
'editor': 'entry'},
'bigincrement': {
'editor': 'entry'},
'bitmap': {
'editor': 'choice',
'params': {'values': ('',) + TK_BITMAPS, 'state': 'readonly'}},
'blockcursor': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'buttonbackground': {
'editor': 'colorentry'},
'buttoncursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
'buttondownrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'buttonuprelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'class_': {
'editor': 'entry'},
'closeenough': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
# ttk.Treeview.Column
'column_anchor': {
'editor': 'choice',
'params': {'values': ('', tk.W, tk.CENTER, tk.E), 'state': 'readonly'},
'default': tk.W},
'command': {
'editor': 'entry'},
# ttk.Label
'compound': {
'editor': 'choice',
'params': {
'values': ('', tk.TOP, tk.BOTTOM, tk.LEFT, tk.RIGHT),
'state': 'readonly'}},
# ttk.Button
'confine': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'container': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'cursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
# ttk.Button
'default': {
'editor': 'choice',
'params': {'values': ('', 'normal', 'active', 'disabled')}},
'digits': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'direction': {
'editor': 'choice',
'tk.Menubutton': {
'params': {'values': ('', tk.LEFT, tk.RIGHT, 'above'),
'state': 'readonly'}},
'ttk.Menubutton': {
'params': {
'values': ('', 'above', 'below', 'flush',
tk.LEFT, tk.RIGHT),
'state': 'readonly'}},
},
'disabledbackground': {
'editor': 'colorentry'},
'disabledforeground': {
'editor': 'colorentry'},
'elementborderwidth': {
'editor': 'entry'},
'endline': {
'editor': 'entry'},
# ttk.Checkbutton, ttk.Entry
'exportselection': {
'editor': 'choice',
'params': {'values': ('', 'true', 'false'), 'state': 'readonly'}},
# ttk.Label
'font': { 'editor': 'fontentry'},
# ttk.Label
'foreground': {
'editor': 'colorentry'},
# ttk.Spinbox
'format': {
'editor': 'entry'},
# ttk.Scale, ttk.Spinbox
'from_': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999},
},
'handlepad': {
'editor': 'entry'},
'handlesize': {
'editor': 'entry'},
# ttk.Treeview.Column
'heading_anchor': {
'editor': 'choice',
'params': {
'values': ('', tk.W, tk.CENTER, tk.E), 'state': 'readonly'},
'default': tk.W},
# ttk.Frame,
'height': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
'validator': 'number_integer',
'tk.Toplevel': {'default': 200},
'tk.Frame': {'default': 200},
'ttk.Frame': {'default': 200},
'tk.LabelFrame': {'default': 200},
'ttk.Labelframe': {'default': 200},
'tk.PanedWindow': {'default': 200},
'ttk.Panedwindow': {'default': 200},
'ttk.Notebook': {'default': 200},
'tk.Text': {'default': 10},
'pygubu.builder.widgets.dialog': {'default': 100}},
'highlightbackground': {
'editor': 'colorentry'},
'highlightcolor': {
'editor': 'colorentry'},
'highlightthickness': {
'editor': 'entry'},
# ttk.Label
'image': {
'editor': 'imageentry'},
'inactiveselectbackground': {
'editor': 'colorentry'},
# ttk.Spinbox
'increment': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999}
},
'indicatoron': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'insertbackground': {
'editor': 'colorentry'},
'insertborderwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
'insertofftime': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'insertontime': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'insertunfocussed': {
'editor': 'choice',
'params': {
'values': ('', 'none', 'hollow', 'solid'),
'state': 'readonly'}},
'insertwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
# ttk.Entry
'invalidcommand': {
'editor': 'entry'},
'jump': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Label
'justify': {
'editor': 'choice',
'params': {'values': ('', 'left', 'center', 'right'),
'state': 'readonly'}},
'label': {
'editor': 'entry'},
# ttk.Labelframe
'labelanchor': {
'editor': 'choice',
'params': {
'values': ('', 'nw', 'n', 'ne', 'en', 'e', 'es',
'se', 's', 'sw', 'ws', 'w'),
'state': 'readonly'}},
# ttk.Progressbar
'length': {
'editor': 'entry'},
'listvariable': {
'editor': 'tkvarentry'},
# ttk.Progressbar
'maximum': {
'editor': 'entry'},
'maxundo': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'minsize': {
'editor': 'entry'},
# ttk.Treeview.Column
'minwidth': {
'editor': 'spinbox',
'params': {'from_': 5, 'to': 999},
'default': '20'},
# ttk.Progressbar
'mode': {
'editor': 'choice',
'params': {
'values': ('', 'determinate', 'indeterminate'),
'state': 'readonly'}},
'offrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
# ttk.Checkbutton
'offvalue': {
'editor': 'entry',
'help': _('offvalue_help')},
# ttk.Checkbutton
'onvalue': {
'editor': 'entry'},
'opaqueresize': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Panedwindow
'orient': {
'editor': 'choice',
'params': {'values': (tk.VERTICAL, tk.HORIZONTAL),
'state': 'readonly'},
'default': tk.HORIZONTAL
},
'overrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}
},
# ttk.Frame, ttk.Label
'padding': {
'editor': 'entry'},
'padx': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
'pady': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
# ttk.Checkbutton
'postcommand': {
'editor': 'entry'},
'readonlybackground': {
'editor': 'colorentry'},
# ttk.Frame,
'relief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'repeatdelay': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'repeatinterval': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100}},
'resolution': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'increment': 0.5},
},
'sliderlength': {
'editor': 'entry'},
'sliderrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'sashcursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
'sashpad': {
'editor': 'entry'},
'sashrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'sashwidth': {
'editor': 'entry'},
'selectbackground': {
'editor': 'colorentry'},
'selectborderwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'selectforeground': {
'editor': 'colorentry'},
'scrollregion': {
'editor': 'entry'},
'selectcolor': {
'editor': 'colorentry'},
'selectimage': {
'editor': 'imageentry'},
# ttk.Treeview
'selectmode': {
'editor': 'choice',
'params': {
'values': ('', tk.BROWSE, tk.SINGLE, tk.MULTIPLE, tk.EXTENDED),
'state': 'readonly'},
'ttk.Treeview': {
'params': {
'values': (tk.EXTENDED, tk.BROWSE, tk.NONE),
'state': 'readonly'},
'default': tk.EXTENDED}
},
'setgrid': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Entry
'show': {
'editor': 'choice',
'tk.Entry': {
'params': {'values': ('', '•'), 'state': 'normal'},
},
'ttk.Entry': {
'params': {'values': ('', '•'), 'state': 'normal'},
},
'ttk.Treeview': {
'params': {
'values': ('', 'tree', 'headings'), 'state': 'readonly'}
},
'pygubu.builder.widgets.editabletreeview': {
'params': {
'values': ('', 'tree', 'headings'), 'state': 'readonly'}
},
},
'showhandle': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'showvalue': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'spacing1': {
'editor': 'entry'},
'spacing2': {
'editor': 'entry'},
'spacing3': {
'editor': 'entry'},
'startline': {
'editor': 'entry'},
'state': {
'editor': 'choice',
'params': {'values': ('', tk.NORMAL, tk.DISABLED),
'state': 'readonly'},
'tk.Button': {
'params': {
'values': ('', tk.NORMAL, tk.ACTIVE, tk.DISABLED),
'state': 'readonly'}},
'tk.Entry': {
'params': {
'values': ('', tk.NORMAL, tk.DISABLED, 'readonly'),
'state': 'readonly'}},
'tk.Combobox': {
'params': {
'values': ('', 'readonly'), 'state': 'readonly'}},
'ttk.Entry': {
'params': {
'values': ('', tk.NORMAL, tk.DISABLED, 'readonly'),
'state': 'readonly'}},
'ttk.Combobox': {
'params': {
'values': ('', 'normal', 'readonly', 'disabled'),
'state': 'readonly'}},
'ttk.Button': {
'params': {
'values': ('', 'normal', 'disabled'),
'state': 'readonly'}},
'ttk.Notebook.Tab': {
'params': {
'values': ('', 'normal', 'disabled', 'hidden'),
'state': 'readonly'}}},
# ttk.Notebook.Tab
'sticky': {
'editor': 'stickyentry',
'params': {}},
# ttk.Treeview.Column
'stretch': {
'editor': 'choice',
'ttk.Treeview.Column': {
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'true'},
'tk.PanedWindow.Pane': {
'params': {
'values': ('', 'always', 'first', 'last', 'middle', 'never'),
'state': 'readonly'}}},
'style': {
'editor': 'choice'},
'tabs': {
'editor': 'entry'}, # FIXME see tk.Text tab property
'tabstyle': {
'editor': 'choice',
'params': {
'values': ('', 'tabular', 'wordprocessor'),
'state': 'readonly'}},
'takefocus': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'tearoff': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'tearoffcommand': {
'editor': 'entry' },
# ttk.Label
'text': {
'editor': 'text'},
# ttk.Label
'textvariable': {
'editor': 'tkvarentry'},
'tickinterval': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'increment': 0.5},
},
# ttk.Scale, ttk.Spinbox
'to': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999},
},
'tristateimage': {
'editor': 'imageentry'},
'tristatevalue': {
'editor': 'entry'},
'troughcolor': {
'editor': 'colorentry'},
# ttk.Label
'underline': {
'editor': 'spinbox'},
'undo': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'value': {
'editor': 'entry'},
# ttk.Checkbutton
'values': {
'editor': 'entry'},
'validate': {
'editor': 'choice',
'params': {
'values': ('', 'none', 'focus', 'focusin',
'focusout', 'key', 'all'),
'state': 'readonly'}},
'validatecommand': {
'editor': 'entry'},
# ttk.Checkbutton
'variable': {
'editor': 'tkvarentry'},
# ttk.Panedwindow.Pane
'weight': {
'editor': 'spinbox', 'params': {'from_': 0, 'to': 999}},
# ttk.Frame, ttk.Label
'width': {
'editor': 'dynamic',
'params': {'mode': 'spinbox', 'from_': 0, 'to': 999},
'tk.Button': {
'params': {'mode': 'spinbox', 'from_': -999, 'to': 999}},
'ttk.Button': {
'params': {'mode': 'spinbox', 'from_': -999, 'to': 999}},
'tk.Canvas': {
'params': {'mode': 'entry'}
},
'tk.Toplevel': {
'default': 200},
'tk.Frame': {
'default': 200},
'ttk.Frame': {
'default': 200},
'tk.LabelFrame': {
'default': 200},
'ttk.Labelframe': {
'default': 200},
'tk.PanedWindow': {
'default': 200},
'ttk.Panedwindow': {
'default': 200},
'ttk.Notebook': {
'default': 200},
'tk.Text': {
'default': 50},
'ttk.Treeview.Column': {
'params': {'mode': 'spinbox', 'from_': 5},
'default': 200},
'pygubu.builder.widgets.dialog': {
'default': 200}},
# ttk.Spinbox
'wrap': {
'editor': 'choice',
'params': {
'values': ('', 'false', 'true'),
'state': 'readonly'},
'tk.Text': {
'params': {
'values': ('', tk.CHAR, tk.WORD, tk.NONE),
'state': 'readonly'}}
},
# ttk.Label
'wraplength': {
'editor': 'entry'},
# ttk.Entry
'xscrollcommand': {
'editor': 'entry'},
'xscrollincrement': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}
},
# ttk.Treeview
'yscrollcommand': {
'editor': 'entry'},
'yscrollincrement': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}
},
}
REQUIRED_OPTIONS = {
'class': {
'editor': 'entry',
'params': {'state': 'readonly'}},
'id': {
'editor': 'entry'},
}
CUSTOM_OPTIONS = {
'command_id_arg': {
'editor': 'choice',
'params': {
'values': ('true', 'false'),
'state': 'readonly'},
'default': 'false'},
'geometry': {
'editor': 'entry'},
'invalidcommand_args': {
'editor': 'entry'},
'maxsize': {
'editor': 'whentry'},
'minsize': {
'editor': 'whentry'},
'overrideredirect': {
'editor': 'choice',
'params': {'values': ('', 'True', 'False'), 'state': 'readonly'}},
'resizable': {
'editor': 'choice',
'params': {
'values': ('', 'both', 'horizontally', 'vertically', 'none'),
'state': 'readonly'}},
'scrolltype': {
'editor': 'choice',
'params': {
'values': ('both', 'vertical', 'horizontal'),
'state': 'readonly'},
'default': 'both'},
'text': {
'editor': 'text'},
'title': {
'editor': 'entry'},
'tree_column': {
'editor': 'choice',
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'false'},
'usemousewheel': {
'editor': 'choice',
'params': {
'values': ('true', 'false'),
'state': 'readonly'},
'default': 'false'},
'validatecommand_args': {
'editor': 'entry'},
'visible': {
'editor': 'choice',
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'true'},
}
WIDGET_REQUIRED_OPTIONS = ('class', 'id')
WIDGET_STANDARD_OPTIONS = (
'accelerator', 'activerelief', 'activestyle', 'activebackground',
'activeborderwidth', 'activeforeground', 'after',
'anchor', 'background', 'bitmap', 'borderwidth',
'class_', 'compound', 'cursor', 'disabledforeground',
'exportselection',
'font', 'foreground', 'jump', 'highlightbackground',
'highlightcolor', 'highlightthickness', 'image',
'indicatoron', 'insertbackground',
'insertborderwidth', 'insertofftime', 'insertontime', 'insertwidth',
'justify', 'orient', 'padx', 'pady', 'relief',
'repeatdelay', 'repeatinterval', 'selectbackground', 'selectborderwidth',
'selectforeground', 'setgrid', 'state', 'style', 'takefocus', 'text',
'textvariable', 'troughcolor', 'underline', 'width', 'wraplength',
'xscrollcommand', 'yscrollcommand')
WIDGET_SPECIFIC_OPTIONS = (
'activestyle', 'activerelief', 'anchor', 'aspect',
'autoseparators', 'background', 'bigincrement',
'blockcursor', 'borderwidth', 'buttonbackground', 'buttoncursor',
'buttondownrelief', 'buttonuprelief',
'class_', 'column_anchor', 'command', 'compound', 'container',
'closeenough', 'confine', 'default', 'digits', 'direction',
'disabledbackground', 'disabledforeground', 'elementborderwidth',
'endline', 'exportselection', 'font',
'foreground', 'format', 'from_', 'to',
'inactiveselectbackground', 'increment', 'insertunfocussed',
'invalidcommand', 'justify', 'handlepad', 'handlesize',
'heading_anchor', 'height', 'image', 'indicatoron',
'label', 'labelanchor', 'listvariable', 'length',
'maximum', 'maxundo',
'minsize', 'minwidth', 'mode', 'offrelief', 'offvalue',
'onvalue', 'opaqueresize', 'orient', 'overrelief',
'padding', 'padx', 'pady',
'postcommand', 'readonlybackground', 'relief', 'resolution',
'scrollregion', 'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
'selectcolor', 'selectimage', 'selectmode', 'show',
'showhandle', 'showvalue', 'sliderlength', 'sliderrelief',
'spacing1', 'spacing2', 'spacing3', 'startline',
'state', 'sticky', 'stretch', 'tabs', 'tabstyle',
'text', 'textvariable', 'tickinterval', 'tristateimage',
'tristatevalue', 'underline', 'validate', 'undo', 'validatecommand',
'value', 'values', 'variable', 'weight', 'width', 'wrap',
'wraplength', 'xscrollincrement', 'yscrollincrement',
'tearoff', 'tearoffcommand'
)
WIDGET_CUSTOM_OPTIONS = [
'command_id_arg', 'invalidcommand_args', 'tree_column',
'validatecommand_args', 'visible', 'scrolltype', 'text',
'title', 'geometry', 'overrideredirect', 'resizable',
'minsize', 'maxsize', 'usemousewheel'
]
WIDGET_PROPERTIES = wp = dict(TK_WIDGET_OPTIONS)
wp.update(REQUIRED_OPTIONS)
wp.update(CUSTOM_OPTIONS)
LAYOUT_OPTIONS = {
# grid packing properties
'row': {
'editor': 'numberentry',
# to_ = 50 is a pygubu-designer restriction
'params': {'from_': 0, 'to_': 50}
},
'column': {
'editor': 'numberentry',
# to_ = 50 is a pygubu-designer restriction
'params': {'from_': 0, 'to_': 50}
},
'sticky': {
'editor': 'stickyentry',
'params': {}},
'rowspan': {
'editor': 'numberentry',
'params': {'from_': 1, 'to_': 50}
},
'columnspan': {
'editor': 'numberentry',
'params': {'from_': 1, 'to_': 50}
},
'padx': {'editor': 'dimensionentry'},
'pady': {'editor': 'dimensionentry'},
'ipadx': {'editor': 'dimensionentry'},
'ipady': {'editor': 'dimensionentry'},
'propagate': {
'editor': 'choice',
'params': {'values': ('True', 'False'), 'state': 'readonly'},
'default': 'True'},
#
# grid row and column properties (can be applied to each row or column)
#
'minsize': {
'editor': 'dimensionentry',
'params': {'width': 4, 'empty_data': 0}},
'pad': {
'editor': 'dimensionentry',
'params': {'width': 4, 'empty_data': 0}},
'weight': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'state': 'readonly', 'width': 3}}
}
GRID_PROPERTIES = [
'row', 'column', 'sticky', 'rowspan', 'columnspan', 'padx', 'pady',
'ipadx', 'ipady', 'propagate']
GRID_RC_PROPERTIES = ['minsize', 'pad', 'weight']
TRANSLATABLE_PROPERTIES = [
'label', 'text', 'title',
]
def _register_custom(name, descr):
if name not in CUSTOM_OPTIONS:
CUSTOM_OPTIONS[name] = descr
WIDGET_PROPERTIES.update(CUSTOM_OPTIONS)
WIDGET_CUSTOM_OPTIONS.append(name)
WIDGET_CUSTOM_OPTIONS.sort()
logger.debug('Registered property; {0}'.format(name))
def register_property(name, descr):
_register_custom(name, descr)
builderobject._old_register_property(name, descr)
if not hasattr(builderobject, '_register_fixed_'):
for name, descr in builderobject.CUSTOM_PROPERTIES.items():
_register_custom(name, descr)
builderobject._register_fixed_ = True
builderobject._old_register_property = builderobject.register_property
builderobject.register_property = register_property
logger.debug('Installed custom register_property function')
|
"""
@Date: 2021/08/12
@description:
"""
import torch
import torch.nn as nn
class LEDLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.L1Loss()
def forward(self, gt, dt):
camera_height = 1.6
gt_depth = gt['depth'] * camera_height
dt_ceil_depth = dt['ceil_depth'] * camera_height * gt['ratio']
dt_floor_depth = dt['depth'] * camera_height
ceil_loss = self.loss(gt_depth, dt_ceil_depth)
floor_loss = self.loss(gt_depth, dt_floor_depth)
loss = floor_loss + ceil_loss
return loss
if __name__ == '__main__':
import numpy as np
from dataset.mp3d_dataset import MP3DDataset
mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train')
gt = mp3d_dataset.__getitem__(0)
gt['depth'] = torch.from_numpy(gt['depth'][np.newaxis]) # batch size is 1
gt['ratio'] = torch.from_numpy(gt['ratio'][np.newaxis]) # batch size is 1
dummy_dt = {
'depth': gt['depth'].clone(),
'ceil_depth': gt['depth'] / gt['ratio']
}
# dummy_dt['depth'][..., :20] *= 3 # some different
led_loss = LEDLoss()
loss = led_loss(gt, dummy_dt)
print(loss)
|
#!/usr/bin/env python3
from setuptools import find_packages, setup
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
with open("readme.md") as f:
md = f.read()
except:
md = ""
setup(
name="k1lib",
packages=["k1lib", "k1lib._hidden",
"k1lib.cli",
"k1lib.callbacks", "k1lib.callbacks.profilers",
"k1lib.callbacks.lossFunctions",
"k1lib._mo"],
version="0.12",
install_requires=["torch", "numpy>=1.14", "matplotlib>=2.0", "dill"],
extras_require={"graphviz": ["graphviz"], "vision": ["torchvision", "pillow"], "skimage": ["scikit-image"]},
description="Some nice ML overhaul",
url="https://github.com/157239n/k1lib",
author="Quang Ho",
author_email="157239q@gmail.com",
long_description=md,
long_description_content_type='text/markdown',
license="MIT",
)
|
def sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i, j]
return result
|
import unittest
import moksha.wsgi.middleware
import webtest
from nose.tools import raises
from nose.tools import eq_
class TestMiddleware(unittest.TestCase):
def setUp(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['Hello, world!\n']
self.app = app
def test_middleware_wrap(self):
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, {})
def test_middleware_simple(self):
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, {})
test_app = webtest.TestApp(app)
response = test_app.get('/')
eq_(response.status, '200 OK')
@raises(KeyError)
def test_no_registry(self):
config = {'moksha.registry': False}
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, config)
response = webtest.TestApp(app).get('/')
def test_external_registry(self):
config = {'moksha.registry': False}
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, config)
from paste.registry import RegistryManager
app = RegistryManager(app)
response = webtest.TestApp(app).get('/')
eq_(response.status, '200 OK')
@raises(NotImplementedError)
def test_connectors(self):
config = {'moksha.connectors': True}
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, config)
response = webtest.TestApp(app).get('/')
@raises(NotImplementedError)
def test_csrf(self):
config = {'moksha.csrf_protection': True}
app = moksha.wsgi.middleware.make_moksha_middleware(self.app, config)
response = webtest.TestApp(app).get('/')
|
import time
def Chorus_origin():
for i in range(2):
img1()
time.sleep(0.5)
img2()
time.sleep(0.5)
img3()
time.sleep(1)
img3()
time.sleep(1)
Chorus_origin
img1()
time.sleep(4)
img2()
time.sleep(4)
img3()
time.sleep(10)
img4()
time.sleep(4)
img5()
time.sleep(4)
img6()
time.sleep(5)
img7()
time.sleep(15)
img8()
time.sleep(4)
img9()
time.sleep(4)
imge10()
|
"""ヴィジュネル暗号を作る関数が入っています。"""
def make_vij(key:str,sent:str)->str:
"""
第一引数に鍵、第二引数に平文を受け取りヴィジュネル暗号を返します。
"""
x,y=0,0
ang=""
key=key.lower()
sent=sent.lower()
while y<len(sent):
if ord(sent[y])>=ord('a') and ord(sent[y])<=ord('z'):
ang+=chr(ord('A')+(ord(sent[y])+ord(key[x])-ord('a')*2)%26)
x+=1
else:
ang+=sent[y]
y+=1
x%=len(key)
return ang
if __name__=="__main__":
print("ウィジュネル暗号生成ツール\n鍵=",end='')
key=input()
print("文字列=",end='')
sen=input()
print("->"+make_vij(key,sen))
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TaxPercentagesGraphQLTestCase::test_getting_tax_percentages 1'] = {
'data': {
'taxPercentages': {
'edges': [
{
'node': {
'value': '0.00'
}
},
{
'node': {
'value': '10.00'
}
},
{
'node': {
'value': '14.00'
}
},
{
'node': {
'value': '24.00'
}
}
]
}
}
}
|
# pytype: skip-file
import logging
from logging import Logger
from typing import Callable, Dict, Any, Optional
from slack_bolt.context import BoltContext
from slack_bolt.context.ack import Ack
from slack_bolt.context.respond import Respond
from slack_bolt.context.say import Say
from slack_bolt.request import BoltRequest
from slack_bolt.response import BoltResponse
from slack_sdk import WebClient
class Args:
client: WebClient
logger: Logger
req: BoltRequest
resp: BoltResponse
request: BoltRequest
response: BoltResponse
context: BoltContext
body: Dict[str, Any]
# payload
payload: Dict[str, Any]
options: Optional[Dict[str, Any]] # payload alias
shortcut: Optional[Dict[str, Any]] # payload alias
action: Optional[Dict[str, Any]] # payload alias
view: Optional[Dict[str, Any]] # payload alias
command: Optional[Dict[str, Any]] # payload alias
event: Optional[Dict[str, Any]] # payload alias
message: Optional[Dict[str, Any]] # payload alias
# utilities
ack: Ack
say: Say
respond: Respond
# middleware
next: Callable[[], None]
def __init__(
self,
*,
logger: logging.Logger,
client: WebClient,
req: BoltRequest,
resp: BoltResponse,
context: BoltContext,
body: Dict[str, Any],
payload: Dict[str, Any],
options: Optional[Dict[str, Any]] = None,
shortcut: Optional[Dict[str, Any]] = None,
action: Optional[Dict[str, Any]] = None,
view: Optional[Dict[str, Any]] = None,
command: Optional[Dict[str, Any]] = None,
event: Optional[Dict[str, Any]] = None,
message: Optional[Dict[str, Any]] = None,
ack: Ack,
say: Say,
respond: Respond,
next: Callable[[], None],
**kwargs # noqa
):
self.logger: logging.Logger = logger
self.client: WebClient = client
self.request = self.req = req
self.response = self.resp = resp
self.context: BoltContext = context
self.body: Dict[str, Any] = body
self.payload: Dict[str, Any] = payload
self.options: Optional[Dict[str, Any]] = options
self.shortcut: Optional[Dict[str, Any]] = shortcut
self.action: Optional[Dict[str, Any]] = action
self.view: Optional[Dict[str, Any]] = view
self.command: Optional[Dict[str, Any]] = command
self.event: Optional[Dict[str, Any]] = event
self.message: Optional[Dict[str, Any]] = message
self.ack: Ack = ack
self.say: Say = say
self.respond: Respond = respond
self.next: Callable[[], None] = next
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.