content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
class MyPermission(models.Model):
'''
自定义权限表
'''
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
permission = models.ForeignKey(Permission, null=True, blank=True, db_index=True)
name = models.CharField(u'权限名称', max_length=50, unique=True, null=False, blank=False, help_text=u'请使用英文名称,只能使用数字、字母以及特殊字符(._-)')
is_nav = models.BooleanField(u'是否为导航', default=True)
nav_name = models.CharField(u'导航名称', max_length=50, null=False, blank=False, help_text=u'如果不是导航,可不用填写')
url = models.CharField(u'目录url', max_length=150, null=True, blank=True, help_text=u'注意:比如/p/123/, 请维护成/p/modify/')
is_default = models.BooleanField(u'是否为默认权限', default=False)
order_id = models.IntegerField(u'导航顺序', default=1, help_text=u'越小排在越前面')
def __unicode__(self):
return u'{}({})'.format(self.name, self.nav_name)
def per(self):
permission = self.permission
return '{}.{}'.format(permission.content_type.app_label, permission.codename)
def nav_children(self):
return self.children.filter(is_nav=True)
def save(self, *args, **kwargs):
if self.permission:
p = self.permission
p.codename = self.name
p.name = self.name
p.save()
else:
content_type, _ = ContentType.objects.get_or_create(app_label='perm', model='mypermission')
permission, _ = Permission.objects.get_or_create(
codename=self.name,
name=self.name,
content_type=content_type
)
setattr(self, 'permission', permission)
super(MyPermission, self).save(*args, **kwargs)
def get_perm(self):
return '%s.%s' % (self.permission.content_type.app_label, self.permission.codename)
class Meta:
managed = False
db_table = 'perm_mypermission'
|
import sys
from os.path import join, dirname
sys.path.append(join(dirname(__file__), '../src'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-07 21:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nominations', '0028_auto_20170707_2054'),
]
operations = [
migrations.AddField(
model_name='application',
name='vol_incumbent',
field=models.NullBooleanField(verbose_name='Incumbent?'),
),
]
|
import torch.nn as nn
from ..abs import BridgeBase
class Bridge(BridgeBase):
r"""
Bridge the connection between encoder and decoder.
"""
def __init__(self, args):
super().__init__(args)
self.num_layers = args.num_layers
self.dec_init = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size *2* args.num_layers),
nn.Tanh())
def forward(self, encoder_state):
ctx=encoder_state['ctx']
mask = encoder_state['mask']
h = self.dec_init(encoder_state['h'])
if h.ndimension() == 2:
# B x 1 x D
h = h.unsqueeze(1)
hs=h.chunk(2* self.num_layers,-1)
hs0 = hs[:self.num_layers]
hs1=hs[self.num_layers:]
state_r2l = {
'encoder': {
'ctx': ctx,
'mask': mask
},
}
for i in range(self.num_layers):
state_r2l['l%d' % i] = {
'prev_state': hs0[i].contiguous(),
}
state_l2r={
'encoder': {
'ctx': ctx,
'mask': mask
},
}
for i in range(self.num_layers):
state_l2r['l%d' % i] = {
'prev_state': hs1[i].contiguous(),
}
return {
'l2r':state_l2r,
'r2l':state_r2l
}
|
# Copyright © 2021, United States Government, as represented by the Administrator of the
# National Aeronautics and Space Administration. All rights reserved.
#
# The “ISAAC - Integrated System for Autonomous and Adaptive Caretaking platform” software is
# licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# ----------------------------------------------------------------------------------------------------
# ISAAC Interface
# Backend API
# ----------------------------------------------------------------------------------------------------
from flask import Flask, abort, jsonify
from database import Database
from ros_connection import ROSConnection
from time import time
import json
import sys
def log(message):
print("[{}] {}".format(int(time()), message))
sys.stdout.flush()
log("opening config")
# Load yaml configuration file
with open("/config.json", "r") as f:
configuration = json.load(f)
log("config loaded")
# Flask application (API)
app = Flask(__name__)
log("establishing db conn")
# Database connection
database_connection = Database()
log("db conn established")
log("establishing ros bridge conn")
# ROS bridge connection
ros_connection = ROSConnection(
database_connection=database_connection,
configuration=configuration,
)
log("ros bridge conn established")
def unsluggify_ros_topic(ros_topic):
# warning! we can't use ros topic names in URLs (because
# they use forward slashes) therefore we have to "sluggify" them
#
# this is a simple process:
# 1. remove prefixed /
# 2. replace any other / with two _
#
return "/" + ros_topic.replace("__", "/")
@app.route('/config.json')
def config_request():
# this enables hot reconfigurations to occur
# because it will re-read the config.json file
# on each API call (i.e.: if you change your config
# you just need to refresh the frontend page)
global configuration
with open("/config.json", "r") as f:
configuration = json.load(f)
return json.dumps(configuration), 200, {'Content-Type': 'application/json'}
@app.route('/history/<ros_topic>/start/<start_time>/end/<end_time>')
def history_time_bound(ros_topic, start_time, end_time):
start_time, end_time = int(float(start_time)), int(float(end_time))
ros_topic = unsluggify_ros_topic(str(ros_topic))
if end_time <= start_time:
abort(400)
if not (ros_topic in ros_connection.available_ros_topics):
abort(404)
result = database_connection.load(
ros_topic=ros_topic, start_time=start_time, end_time=end_time,
)
return json.dumps(result)
@app.route('/topics')
def ros_topic_list():
return json.dumps(ros_connection.available_ros_topics)
if __name__ == "__main__":
print("Launching IDI Backend with the following configuration:")
print(configuration)
print("\n")
app.run(debug=True, host="0.0.0.0", port=9091)
|
# 鸡尾酒排序算法的python实现
def cocktail_shaker_sort(a):
for i in range(len(a)-1, 0, -1):
swapped = False
for j in range(i, 0, -1):
if a[j] < a[j-1]:
a[j], a[j-1] = a[j-1], a[j]
swapped = True
for j in range(i):
if a[j] > a[j+1]:
a[j], a[j+1] = a[j+1], a[j]
swapped = True
if not swapped:
return a
if __name__ == '__main__':
sort_a = cocktail_shaker_sort([12, 23, 4, 5, 3, 2, 12, 81, 56, 95])
print(sort_a)
|
from django.shortcuts import render
from django.http import Http404
from .models import Sponser
from .serializers import SponserSerializer
# rest dependencies import
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
class AllSponsers(APIView):
model = Sponser
serializer = SponserSerializer
def get(self, request, format=None, *args, **kwargs):
sponser = self.model.objects.all()
serializer = self.serializer(sponser, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
serializer = self.serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
sponser_data = serializer.data
response = {
'data': {
'sponser': dict(sponser_data),
'status': 'success',
'sponser': 'Sponser has been added successfully'
}
}
return Response(response, status=status.HTTP_201_CREATED)
class SingleSponser(APIView):
model = Sponser
serializer = SponserSerializer
def get_object(self, pk):
try:
return self.model.objects.get(pk=pk)
except self.model.DoesNotExist:
raise Http404
def get(self, request, pk, format=None, *args, **kwargs):
sponser = self.get_object(pk)
serializer = self.serializer(sponser)
return Response(serializer.data)
def put(self, request, pk, format=None, *args, **kwargs):
sponser = self.get_object(pk)
serializer = self.serializer(sponser, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None, *args, **kwargs):
sponser = self.get_object(pk)
sponser.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from separador_silabico import *
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' Marca a silaba tonica da palavra, com base nos dados de separacao silabica '
' Chamada: marcaSilTonica(palavra) '
' Entrada: palavra -> string com a palavra '
' Saída: retorna a palavra silabificada com marcação de acento '
' Ex.: silabifica("palavra") '
' >> pa-@la-vra '
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
vogaisAcent = u'[áàâéêíóôú]'
vogaisTil = u'[ãõ]'
def hasVogalAcent(sil):
return re.search(vogaisAcent,sil)
def hasTilda(sil):
return re.search(vogaisTil,sil)
def defineSilTonica(word):
word = silabifica(word)
silabas = word.split('-')
ultimaSilaba = len(silabas) - 1
for idx,silaba in enumerate(silabas):
silabaAtual = ultimaSilaba - idx
#Se a palavra acento agudo ou circunflexo, a silaba em q ocorre ele eh a tonica da palavra
if hasVogalAcent(silaba):
return silabaAtual
#Se a palavra nao possui acento agudo ou circunflexo, mas possui til, a silaba em q ele ocorre eh a tonica
elif hasTilda(silaba):
return silabaAtual
elif (idx == ultimaSilaba):
#Se a palavra termina em r,x,n,l e nao tem acento -- oxitona
if (re.search('[rxnlz]$',silaba)):
return 0
#Se a palavra termina em i,u,is,us,im,um e nao tem acento -- oxitona
elif (re.search('[iu][sm]?$',silaba)):
return 0
#Se termina em 'uns' e nao tem acento -- oxitona
elif (re.search('uns$',silaba)):
return 0
else:
if (len(silabas) < 2):
return 0
else:
return 1
def marcaSilTonica(word,posicao = False):
#Pega a classificacao da palavra
#oxitona = 0
#paroxitona = 1
#proparoxitona = 2
if (posicao):
posicaoAcento = posicao
else:
posicaoAcento = defineSilTonica(word)
#Silabifica a palavra e
silabas = silabifica(word).split('-')
#Determina a silaba tonica de acordo com a saida de defineSilTonica(word)
numSilAcentuada = (len(silabas) - 1) - posicaoAcento
#Adiciona '@' como simbolo de acento
silabas[numSilAcentuada] = '@' + silabas[numSilAcentuada]
return '-'.join(silabas)
def definePosDaTonica(word):
silabas = word.split('-')
for idx, silaba in enumerate(silabas):
if '@' in silaba:
return idx
#Retorna a distancia entre a posicao informada e a silaba tonica
def posRelacaoTonica(trans,pos):
trans = trans
#numSyll = len(trans.split('-')) - 1
accentedSyll = definePosDaTonica(trans)
#cria um contador q cria uma escala com base na silaba tonica:
#ex.: o - @xi - to - na
# -1 0 1 2
syllableCount = -accentedSyll
i = 0
for ch in trans:
#Se ha um hifen, ha outra silabaa -> atualiza o contador
if (ch == '-'):
syllableCount += 1
if (i == pos):
return syllableCount
i += 1
|
from console import Console
from action import Action
from userLoginAction import UserLoginAction
from userRegisterAction import UserRegisterAction
class MainAction(Action):
def FollowInstructions(self, info = None):
Console.Clear()
print("""********************************\n
Acciones disponibles:
- [R] Registro
- [L] Login
- [T] Terminar
********************************
""")
return input("Seleccione la acción a ejecutar: ").upper()
def IsValidAction(self, option):
return option in ["R", "1", "L", "2", "T", "3"]
def GetNextAction(self):
nextAction = None
if self.SelectedOption == "R" or self.SelectedOption == "1":
nextAction = UserRegisterAction(self)
elif self.SelectedOption == "L" or self.SelectedOption == "2":
nextAction = UserLoginAction(self)
elif self.SelectedOption == "T" or self.SelectedOption == "3":
exit()
nextAction.Init()
return self
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software and supporting documentation are distributed by
# Institut Federatif de Recherche 49
# CEA/NeuroSpin, Batiment 145,
# 91191 Gif-sur-Yvette cedex
# France
#
# This software is governed by the CeCILL license version 2 under
# French law and abiding by the rules of distribution of free software.
# You can use, modify and/or redistribute the software under the
# terms of the CeCILL license version 2 as circulated by CEA, CNRS
# and INRIA at the following URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
"""
This program launches postprocessing_results for each training subfolder
"""
import argparse
import glob
import os
import sys
import inspect
import six
from contrastive import evaluation
def parse_args(argv):
"""Parses command-line arguments
Args:
argv: a list containing command line arguments
Returns:
args
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
prog='loop_validate_and_clusterize.py',
description='Analyses all output subfolders')
parser.add_argument(
"-s", "--src_dir", type=str, required=True,
help='Source directory where deep learning results lie.')
parser.add_argument(
"-c", "--csv_file", type=str, required=True,
help='csv file on which is done the evaluation.')
args = parser.parse_args(argv)
return args
def loop_over_directory(src_dir, csv_file):
"""Loops over deep learning directories
"""
# Gets and creates all filenames
dirnames = glob.glob(f"{src_dir}/*")
for deep_dir in dirnames:
deep_dir = os.path.abspath(deep_dir)
analysis_path = f"{deep_dir}"
checkpoint_file = glob.glob(
f"{deep_dir}/logs/default/version_0/checkpoints/*.ckpt")
checkpoint_file = os.path.abspath(checkpoint_file[0])
checkpoint_path = f"'\"{checkpoint_file}\"'"
config_path = f"{deep_dir}/.hydra"
prog_path = os.path.dirname(inspect.getabsfile(evaluation))
cmd = f"python3 {prog_path}/validate_and_clusterize_output.py " \
f"+analysis_path={analysis_path} " \
f"checkpoint_path={checkpoint_path} " \
f"train_val_csv_file={csv_file} "\
f"--config-path={config_path}"
print(cmd)
os.system(cmd)
def main(argv):
"""Reads argument line and launches validate_and_clusterize on each
Args:
argv: a list containing command line arguments
"""
# This code permits to catch SystemExit with exit code 0
# such as the one raised when "--help" is given as argument
try:
# Parsing arguments
args = parse_args(argv)
loop_over_directory(args.src_dir, args.csv_file)
except SystemExit as exc:
if exc.code != 0:
six.reraise(*sys.exc_info())
if __name__ == '__main__':
# This permits to call main also from another python program
# without having to make system calls
main(argv=sys.argv[1:])
# example of use
# python3 loop_validate_and_clusterize_output.py -s ../../../Output/t-0.1
|
import FWCore.ParameterSet.Config as cms
class RandomRunSource (cms.Source):
"""The class is a Source whose run is chosen randomly. This initializes identically to a cms.Source
and after being initialized the run number distribution is set by calling 'setRunDistribution'.
"""
def setRunDistribution(self,runsAndProbs):
"""Pass a list of tuple pairs, with the first item of the pair a run number
and the second number of the pair a weight. The class will normalize the
weights so you do not have to. The pairs will be used to randomly choose what Run
should be assigned to the job.
"""
self.__dict__['runsAndProbs']=runsAndProbs
def insertInto(self, parameterSet, myname):
from random import SystemRandom
totalProb = 0.
for r,p in self.__dict__['runsAndProbs']:
totalProb+=p
#this is the same random generator used to set the seeds for the RandomNumberGeneratorService
random = SystemRandom()
runProb = random.uniform(0,totalProb)
print runProb
sumProb = 0
runNumber = 0
for r,p in self.__dict__['runsAndProbs']:
sumProb+=p
if sumProb >= runProb:
runNumber = r
break
if self.type_() == "PoolSource":
self.setRunNumber = cms.untracked.uint32(runNumber)
else:
#sources that inherit from ConfigurableInputSource use 'firstRun'
self.firstRun = cms.untracked.uint32(runNumber)
super(RandomRunSource,self).insertInto(parameterSet,myname)
|
#!/usr/bin/env python3
import fileinput
def parse_rest(rest):
if "no other bags" in rest:
return []
bagses = rest.split(", ")
ret = []
for one_bag in bagses:
tokens = one_bag.split(" ")
ret.append((" ".join(tokens[1:-1]), int(tokens[0])))
return ret
def parse():
graph = dict()
for line in fileinput.input():
idx = line.find("bags")
start = line[:idx-1]
graph[start] = parse_rest(line[idx + len("bags contain "):])
return graph
def dfs(graph, start, search):
if start == search:
return True
for next_bag, _ in graph[start]:
if dfs(graph, next_bag, search):
return True
return False
def part1():
graph = parse()
count = 0
for outer_bag in graph.keys():
if outer_bag != "shiny gold" and dfs(graph, outer_bag, "shiny gold"):
count += 1
print(count)
def dfs_count(graph, start):
ct = 1
for bag, n in graph[start]:
ct += n * dfs_count(graph, bag)
return ct
def part2():
graph = parse()
print(dfs_count(graph, "shiny gold") - 1)
part1()
part2()
|
from django.conf.urls import patterns, url
from .views import CurrentLocation, PastLocations
urlpatterns = patterns('',
url(r'^current_location/$', CurrentLocation.as_view(), name="current_location"),
url(r'^past_locations/$', PastLocations.as_view(), name="past_locations"),
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 396.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/4/22 10:42
------------
"""
from functools import reduce
from typing import List
class Solution:
def maxRotateFunction(self, nums: List[int]) -> int:
current = sum(map(lambda x: x[0] * x[1], zip(nums, range(len(nums)))))
ans = current
for i in range(1, len(nums)):
n = nums[len(nums) - i]
current -= n * (len(nums) - 1)
current += sum(nums[:len(nums) - i])
current += sum(nums[len(nums) - i + 1:])
ans = max(ans, current)
return ans
if __name__ == '__main__':
s = Solution()
print(s.maxRotateFunction([4, 3, 2, 6]))
print(s.maxRotateFunction([100]))
|
from django.template import Library
register = Library()
@register.filter
def noop(variable, param=None):
return variable
|
from setuptools import setup
setup(
name='cached-collections',
version='0.1.0',
description=('Synchronized between processes in-memory cache for storing '
'frequently used data'),
author='Vladimir Magamedov',
author_email='vladimir@magamedov.com',
url='https://github.com/vmagamedov/cached-collections',
py_modules=['cached_collections'],
license='BSD',
install_requires=['redis'],
)
|
def look_up_word_value(words):
"""
---------------------------------------------------------------------
DESCRIPTION
Translates the word (string) array into a floating-point value array.
---------------------------------------------------------------------
PARAMETERS
words (string array): The array of words to convert into a floating-
point value array.
---------------------------------------------------------------------
"""
the_dictionary = {}
word_num = 0
the_list_of_words = open("C:/YourShortListOfWords.txt", "r")
the_text_within = the_list_of_words.read()
for line in the_text_within.split('\n'):
# print(line+":"+str(word_num))
the_dictionary[line] = word_num
word_num = word_num + 1
looked_up_array = []
for word in words:
looked_up_array.append(int(the_dictionary[word]))
# print(looked_up_array)
real_looked_up_array = []
for word_val in looked_up_array:
real_looked_up_array.append(word_val / 10000)
return real_looked_up_array
def look_up_word_for_value(word_values):
"""
---------------------------------------------------------------------
DESCRIPTION
Translates the floating-point value array into a word (string) array.
---------------------------------------------------------------------
PARAMETERS
wordvalues (floating-point value array): The array of floating-point
values to convert into a word (string) array.
---------------------------------------------------------------------
"""
word_list_here = []
the_list_of_words_here = open("C:/YourShortListOfWords.txt", "r")
the_word_list_within = the_list_of_words_here.read()
for line in the_word_list_within.split('\n'):
word_list_here.append(line)
output_word_list_here = []
for word_value in word_values:
output_word_list_here.append(word_list_here[int(word_value * 10000)])
return output_word_list_here
def is_valid_word_array(words_to_check):
"""
---------------------------------------------------------------------
DESCRIPTION
Checks if the words in the word (string) array are part of the
dictionary.
---------------------------------------------------------------------
PARAMETERS
words_to_check (string array): The array of words to check for in the
dictionary.
---------------------------------------------------------------------
"""
valid = True
try:
look_up_word_value(words_to_check)
except:
valid = False
return valid
def add_word_to_dictionary(word_to_add):
"""
---------------------------------------------------------------------
DESCRIPTION
Adds a word to the dictionary file, if it does not already exist.
---------------------------------------------------------------------
PARAMETERS
word_to_add (string): The word to add to the dictionary.
---------------------------------------------------------------------
"""
list_of_exist_words = open("C:/YourShortListOfWords.txt", "r")
existing_words = list_of_exist_words.read()
not_taken = True
for ExistLine in existing_words.split('\n'):
if ExistLine.lower() == word_to_add:
not_taken = False
if not_taken:
ready_to_add = open("C:/YourShortListOfWords.txt", "a")
ready_to_add.write("\n" + word_to_add.lower())
def pad_word_array(word_array_to_pad, input_size):
"""
---------------------------------------------------------------------
DESCRIPTION
Pads the word array with ^ to reshape it to the network's input size,
or trims it if necessary. Otherwise, leaves it unchanged.
---------------------------------------------------------------------
PARAMETERS
word_array_to_pad (string array): The word array to pad.
input_size (integer): The input size the neural network expects.
---------------------------------------------------------------------
"""
if len(word_array_to_pad) > input_size:
return word_array_to_pad[0:input_size]
elif len(word_array_to_pad) == input_size:
return word_array_to_pad
elif len(word_array_to_pad) < input_size:
padded_word_array = word_array_to_pad
for PadChar in range(input_size - len(word_array_to_pad)):
padded_word_array.append("^")
return padded_word_array
def easy_convert_sentence_to_values(sentence_array, input_size):
"""
---------------------------------------------------------------------
DESCRIPTION
Converts the array of sentences to an array of word value arrays. If
necessary, they might be padded.
---------------------------------------------------------------------
PARAMETERS
sentence_array (string array): The sentence array to convert.
input_size (integer): The input size the neural network expects.
---------------------------------------------------------------------
"""
arr_of_token_wrd_arrs = []
# Tokenizes each sentence in arr_of_token_wrd_arrs
import nltk
for SentenceToTokenize in sentence_array:
arr_of_token_wrd_arrs.append(pad_word_array(nltk.word_tokenize(SentenceToTokenize), input_size))
# Checks the validity of arr_of_token_wrd_arrs, extending the dictionary if necessary
for WordArray in arr_of_token_wrd_arrs:
for Word in WordArray:
if is_valid_word_array([Word]):
print(Word + " is a valid word.")
else:
add_word_to_dictionary(Word)
# Converts arr_of_token_wrd_arrs to an array of word value arrays
arr_of_wrd_val_arrs = []
for WordArrayToConvert in arr_of_token_wrd_arrs:
arr_of_wrd_val_arrs.append(look_up_word_value(WordArrayToConvert))
return arr_of_wrd_val_arrs
'''
#Keras Example Below
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
#The idea here is to output only one of the inputs (remove redundancy).
#For some reason, the outputs I got had similar values (so the outputs started with the same letter) when the dictionary file
#contained a long list of alphabetically-arranged words.
#I would appreciate it if anyone can help fix this bug.
#Here is the input data
X = np.array(EasyConvertSentenceToValues(["code code","program program","pet pet"],9))
#Here is the output data
y = np.array(EasyConvertSentenceToValues(["code","program","pet"],1))
model = Sequential()
model.add(Dense(8, input_dim=9))
model.add(Activation('tanh'))
model.add(Dense(6))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=0.1)
model.compile(loss='binary_crossentropy', optimizer=sgd)
model.fit(X, y, batch_size=1, nb_epoch=100)
print(model.predict_proba(X))
for whatever in model.predict_proba(X).tolist():
for theThing in whatever:
print(LookUpWordForValue([round(theThing,1000)]))
'''
|
#!/usr/bin/env python
# This script can be run on the core server to upgrade the riak cluster to support workflows
import riak
client = riak.RiakClient(protocol='pbc', nodes=[{'host': 'localhost'}])
client.resolver = riak.resolver.last_written_resolver
with open('/opt/al/pkg/assemblyline/al/install/etc/riak/schema/workflow.xml') as wf_handle:
workflow_schema = wf_handle.read()
client.create_search_schema(schema='workflow', content=workflow_schema)
client.create_search_index('workflow', 'workflow', 3)
bucket = client.bucket('workflow', bucket_type="data")
props = {
'dvv_enabled': False,
'last_write_wins': True,
'allow_mult': False,
'n_val': 3,
'search_index': 'workflow'
}
client.set_bucket_props(bucket=bucket, props=props)
|
import RPi.GPIO as IO
import time
import serial
import CodeReader from QrTest
#--------------UART init-------------------------------------
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
#--------------GPIO init-------------------------------------
IO.setwarnings(False)
IO.setmode(IO.BCM)
IO.setup(2,IO.IN) #GPIO 2 -> Left IR out
IO.setup(3,IO.IN) #GPIO 3 -> Right IR out
#--------------Main------------------------------------------
tourne = False
reader = True
while 1:
if(IO.input(2)==True and IO.input(3)==True): #move forward
tourne = False
reader = True
print('sending Forward')
data = 'F0000000'
ser.write(str(data).encode())
time.sleep(0.1)
elif(IO.input(2)==False and IO.input(3)==True): #turn right
tourne = True
reader = True
print('sending Right')
data = 'R0000000'
data2 = data
ser.write(str(data).encode())
time.sleep(0.1)
elif(IO.input(2)==True and IO.input(3)==False): #turn left
tourne = True
reader = True
print('sending Left')
data = 'L0000000'
data2 = data
ser.write(str(data).encode())
time.sleep(0.1)
else: #stay still
if (tourne == False and reader == True):
reader = False
print('sending Stop')
data = 'S0000000'
res = CodeReader()
ser.write(str(data).encode())
time.sleep(0.1)
ser.write(str(res).encode())
time.sleep(0.1)
if (tourne == False and reader == False):
print('sending Stop but no QR')
data = 'S0000000'
ser.write(str(data).encode())
time.sleep(0.1)
else :
print('Jump')
ser.write(str(data2).encode())
time.sleep(0.1)
|
# ----------------------------------------------------------------------
# DNSZone.type field
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
self.db.add_column(
"dns_dnszone",
"type",
models.CharField(
"Type",
max_length=1,
null=False,
blank=False,
default="F",
choices=[("F", "Forward"), ("4", "Reverse IPv4"), ("6", "Reverse IPv6")],
),
)
self.db.execute("UPDATE dns_dnszone SET type = '4' WHERE name ILIKE '%%.in-addr.arpa'")
self.db.execute(
"UPDATE dns_dnszone SET type = '6' WHERE name ILIKE '%%.ip6.int' OR name ILIKE '.ip6.arpa'"
)
|
from flask import Flask, jsonify, render_template, request
from main import predict
import pickle
HOST = '127.0.0.1'
PORT = 5000
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict-digit', methods=['POST'])
def digit():
# Take the image and preprocess it.
# predict and get the label
# send the label as json to the template.
if request.method == 'POST':
img = request.get_json()
# Load trained network file.
with open('./trained-network', 'rb') as f:
weights = pickle.load(f)
digit, prob = predict(img, weights)
prob = "{0:.2f}".format(prob.item())
data = {'digit': digit.item(), 'prob': prob}
return jsonify(data)
if __name__ == '__main__':
app.run(host=HOST, port=PORT, debug=True)
|
# master morfless libraries from libraries import
from libraries import constants
from libraries import globals
from libraries import schematics
from libraries import header
from libraries import footer
from libraries import main
from libraries import before_after
from libraries import html_elements
from libraries import meta_defaults
from libraries import meta_elements
from libraries import read_schematic
from libraries import second_processes
from libraries import string_processes
from libraries import lists
from libraries import bucket_file_preparation
from libraries import classes
|
from hiword.extractor import KeywordsExtractor
from hiword.extractor import extract_keywords
__version__ = '0.3.1'
__all__ = [
'KeywordsExtractor',
'extract_keywords',
]
|
# 返回强度限制,即图片dtype的(最小,最大)元组。
|
"""
test score_mgr
"""
import datetime
from django.test import TransactionTestCase
from django.contrib.auth.models import User
from apps.managers.score_mgr import score_mgr
from apps.managers.team_mgr.models import Group, Team
from apps.managers.score_mgr.models import ScoreboardEntry, PointsTransaction
from apps.utils import test_utils
class ScoreboardEntryUnitTests(TransactionTestCase):
"""scoreboard test"""
def setUp(self):
"""Generate test. Set the competition settings to the current date for testing."""
self.user = User(username="test_user", password="changeme")
self.user.save()
self.current_round = "Round 1"
test_utils.set_competition_round()
self.user.get_profile().add_points(10, datetime.datetime.today(), "test")
def testUserOverallRoundRankWithPoints(self):
"""Tests that the overall rank calculation for a user in a round is
correct based on points."""
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.get_profile(),
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today()
entry.save()
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.get_profile()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points + 1
entry2.last_awarded_submission = entry.last_awarded_submission
entry2.save()
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
2,
"Check user is now second.")
def testUserOverallRoundRankWithSubmissionDate(self):
"""Tests that the overall rank calculation for a user in a round is
correct based on submission date."""
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.get_profile(),
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today() - datetime.timedelta(days=3)
entry.save()
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.get_profile()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points
entry2.last_awarded_submission = datetime.datetime.today()
entry2.save()
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
2,
"Check user is now second.")
def testUserTeamRoundRankWithPoints(self):
"""Tests that the team rank calculation for a round is correct based
on points."""
# Setup dorm
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
profile = self.user.get_profile()
profile.team = team
profile.save()
# Set up entry
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.get_profile(),
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today()
entry.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.get_profile(),
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.get_profile()
profile2.team = team
profile2.save()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points + 1
entry2.last_awarded_submission = entry.last_awarded_submission
entry2.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.get_profile(),
self.current_round),
2,
"Check user is now second.")
def testUserTeamRoundRankWithSubmissionDate(self):
"""Tests that the team rank calculation for a round is correct based
on points."""
# Set up dorm
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
# Create the entry for the test user
profile = self.user.get_profile()
profile.team = team
profile.save()
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.get_profile(),
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today() - \
datetime.timedelta(days=3)
entry.save()
# Create another test user
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.get_profile()
profile2.team = team
profile2.save()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points
entry2.last_awarded_submission = datetime.datetime.today()
entry2.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.get_profile(),
self.current_round),
2,
"Check user is now second.")
def testRoundRankWithoutEntry(self):
"""Tests that the overall rank calculation is correct even if a user
has not done anything yet."""
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
# Rank will be the number of users who have points plus one.
overall_rank = 1
team_rank = 1
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
overall_rank,
"Check user is last overall for the current round.")
self.assertEqual(score_mgr.player_rank_in_team(self.user.get_profile(),
self.current_round),
team_rank,
"Check user is last in their team for the current "
"round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.get_profile()
profile2.add_points(10, datetime.datetime.today(), "test")
self.assertEqual(score_mgr.player_rank(self.user.get_profile(),
self.current_round),
overall_rank + 1,
"Check that the user's overall rank has moved down.")
self.assertEqual(score_mgr.player_rank_in_team(self.user.get_profile(),
self.current_round),
team_rank + 1,
"Check that the user's team rank has moved down.")
class PointsLogTest(TransactionTestCase):
"""test points log"""
def setUp(self):
self.user = User.objects.create_user("test", "test@test.com")
test_utils.set_competition_round()
def testAddPoints(self):
"""
Test that adding points creates a new entry in the points log.
"""
log_count = PointsTransaction.objects.count()
profile = self.user.get_profile()
profile.add_points(10, datetime.datetime.today(), "Hello world", None)
profile.save()
self.assertEqual(PointsTransaction.objects.count(), log_count + 1,
"A new log should have been created.")
log = profile.user.pointstransaction_set.all()[0]
self.assertEqual(log.points, 10, "Points should have been awarded.")
self.assertEqual(log.message, "Hello world",
"Message should have been added.")
|
# Multiples of 3 and 5
# Problem 1
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these
# multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
import math
import itertools
def is_divisible_by_one(x, factors):
for factor in factors:
if x % factor == 0:
return True
return False
def is_divisible_by_all(x, factors):
for factor in factors:
if x % factor != 0:
return False
return True
def lcm(factors, return_generator=False):
max_factor = max(factors)
generator = (f
for f in itertools.count(max_factor, max_factor)
if is_divisible_by_all(f, factors))
if return_generator:
return generator
else:
return next(generator)
FACTORS = [3, 5]
N = 1000
lcm = lcm(FACTORS)
mod_lcm = [i for i in range(0, lcm) if is_divisible_by_one(i, FACTORS)]
print("modulo lcm numbers\t", mod_lcm)
N_lcm = math.ceil(N / lcm)
print("number of modulos\t", N_lcm)
num_below_1000 = [i
for m in range(0, N_lcm)
for i in map(lambda j: j + (m * lcm), mod_lcm)
if i < N]
print("numbers below N\t\t", num_below_1000)
result = sum(num_below_1000)
print("final result\t\t", result)
|
import os
from fontTools.misc.py23 import basestring
from fontParts.base.errors import FontPartsError
from fontParts.base.base import dynamicProperty, InterpolationMixin
from fontParts.base.layer import _BaseGlyphVendor
from fontParts.base import normalizers
from fontParts.base.compatibility import FontCompatibilityReporter
from fontParts.base.deprecated import DeprecatedFont, RemovedFont
class BaseFont(
_BaseGlyphVendor,
InterpolationMixin,
DeprecatedFont,
RemovedFont
):
"""
A font object. This object is almost always
created with one of the font functions in
:ref:`fontparts-world`.
"""
def __init__(self, pathOrObject=None, showInterface=True):
"""
When constructing a font, the object can be created
in a new file, from an existing file or from a native
object. This is defined with the **pathOrObjectArgument**.
If **pathOrObject** is a string, the string must represent
an existing file. If **pathOrObject** is an instance of the
environment's unwrapped native font object, wrap it with
FontParts. If **pathOrObject** is None, create a new,
empty font. If **showInterface** is ``False``, the font
should be created without graphical interface. The default
for **showInterface** is ``True``.
"""
super(BaseFont, self).__init__(pathOrObject=pathOrObject,
showInterface=showInterface)
def _reprContents(self):
contents = [
"'%s %s'" % (self.info.familyName, self.info.styleName),
]
if self.path is not None:
contents.append("path=%r" % self.path)
return contents
# ----
# Copy
# ----
copyAttributes = (
"info",
"groups",
"kerning",
"features",
"lib",
"layerOrder",
"defaultLayerName",
"glyphOrder"
)
def copy(self):
"""
Copy the font into a new font. ::
>>> copiedFont = font.copy()
This will copy:
* info
* groups
* kerning
* features
* lib
* layers
* layerOrder
* defaultLayerName
* glyphOrder
* guidelines
"""
return super(BaseFont, self).copy()
def copyData(self, source):
"""
Copy data from **source** into this font.
Refer to :meth:`BaseFont.copy` for a list
of values that will be copied.
"""
for layerName in source.layerOrder:
if layerName in self.layerOrder:
layer = self.getLayer(layerName)
else:
layer = self.newLayer(layerName)
layer.copyData(source.getLayer(layerName))
for guideline in self.guidelines:
self.appendGuideline(guideline)
super(BaseFont, self).copyData(source)
# ---------------
# File Operations
# ---------------
# Initialize
def _init(self, pathOrObject=None, showInterface=True, **kwargs):
"""
Initialize this object. This should wrap a native font
object based on the values for **pathOrObject**:
+--------------------+---------------------------------------------------+
| None | Create a new font. |
+--------------------+---------------------------------------------------+
| string | Open the font file located at the given location. |
+--------------------+---------------------------------------------------+
| native font object | Wrap the given object. |
+--------------------+---------------------------------------------------+
If **showInterface** is ``False``, the font should be
created without graphical interface.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# path
path = dynamicProperty(
"base_path",
"""
The path to the file this object represents. ::
>>> print font.path
"/path/to/my/font.ufo"
"""
)
def _get_base_path(self):
path = self._get_path()
if path is not None:
path = normalizers.normalizeFilePath(path)
return path
def _get_path(self, **kwargs):
"""
This is the environment implementation of
:attr:`BaseFont.path`.
This must return a :ref:`type-string` defining the
location of the file or ``None`` indicating that the
font does not have a file representation. If the
returned value is not ``None`` it will be normalized
with :func:`normalizers.normalizeFilePath`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# save
def save(self, path=None, showProgress=False, formatVersion=None):
"""
Save the font to **path**.
>>> font.save()
>>> font.save("/path/to/my/font-2.ufo")
If **path** is None, use the font's original location.
The file type must be inferred from the file extension
of the given path. If no file extension is given, the
environment may fall back to the format of its choice.
**showProgress** indicates if a progress indicator should
be displayed during the operation. Environments may or may
not implement this behavior. **formatVersion** indicates
the format version that should be used for writing the given
file type. For example, if 2 is given for formatVersion
and the file type being written if UFO, the file is to
be written in UFO 2 format. This value is not limited
to UFO format versions. If no format version is given,
the original format version of the file should be preserved.
If there is no original format version it is implied that
the format version is the latest version for the file
type as supported by the environment.
.. note::
Environments may define their own rules governing when
a file should be saved into its original location and
when it should not. For example, a font opened from a
compiled OpenType font may not be written back into
the original OpenType font.
"""
if path is None and self.path is None:
raise IOError(("The font cannot be saved because no file "
"location has been given."))
if path is not None:
path = normalizers.normalizeFilePath(path)
showProgress = bool(showProgress)
if formatVersion is not None:
formatVersion = normalizers.normalizeFileFormatVersion(
formatVersion)
self._save(path=path, showProgress=showProgress,
formatVersion=formatVersion)
def _save(self, path=None, showProgress=False,
formatVersion=None, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.save`. **path** will be a
:ref:`type-string` or ``None``. If **path**
is not ``None``, the value will have been
normalized with :func:`normalizers.normalizeFilePath`.
**showProgress** will be a ``bool`` indicating if
the environment should display a progress bar
during the operation. Environments are not *required*
to display a progress bar even if **showProgess**
is ``True``. **formatVersion** will be :ref:`type-int-float`
or ``None`` indicating the file format version
to write the data into. It will have been normalized
with :func:`normalizers.normalizeFileFormatVersion`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# close
def close(self, save=False):
"""
Close the font.
>>> font.close()
**save** is a boolean indicating if the font
should be saved prior to closing. If **save**
is ``True``, the :meth:`BaseFont.save` method
will be called. The default is ``False``.
"""
if save:
self.save()
self._close()
def _close(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.close`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# generate
@staticmethod
def generateFormatToExtension(format, fallbackFormat):
"""
+--------------+--------------------------------------------------------------------+
| mactype1 | Mac Type 1 font (generates suitcase and LWFN file) |
+--------------+--------------------------------------------------------------------+
| macttf | Mac TrueType font (generates suitcase) |
+--------------+--------------------------------------------------------------------+
| macttdfont | Mac TrueType font (generates suitcase with resources in data fork) |
+--------------+--------------------------------------------------------------------+
| otfcff | PS OpenType (CFF-based) font (OTF) |
+--------------+--------------------------------------------------------------------+
| otfttf | PC TrueType/TT OpenType font (TTF) |
+--------------+--------------------------------------------------------------------+
| pctype1 | PC Type 1 font (binary/PFB) |
+--------------+--------------------------------------------------------------------+
| pcmm | PC MultipleMaster font (PFB) |
+--------------+--------------------------------------------------------------------+
| pctype1ascii | PC Type 1 font (ASCII/PFA) |
+--------------+--------------------------------------------------------------------+
| pcmmascii | PC MultipleMaster font (ASCII/PFA) |
+--------------+--------------------------------------------------------------------+
| ufo1 | UFO format version 1 |
+--------------+--------------------------------------------------------------------+
| ufo2 | UFO format version 2 |
+--------------+--------------------------------------------------------------------+
| ufo3 | UFO format version 3 |
+--------------+--------------------------------------------------------------------+
| unixascii | UNIX ASCII font (ASCII/PFA) |
+--------------+--------------------------------------------------------------------+
"""
formatToExtension = dict(
# mactype1=None,
macttf=".ttf",
macttdfont=".dfont",
otfcff=".otf",
otfttf=".ttf",
# pctype1=None,
# pcmm=None,
# pctype1ascii=None,
# pcmmascii=None,
ufo1=".ufo",
ufo2=".ufo",
ufo3=".ufo",
unixascii=".pfa",
)
return formatToExtension.get(format, fallbackFormat)
def generate(self, format, path=None, **environmentOptions):
"""
Generate the font to another format.
>>> font.generate("otfcff")
>>> font.generate("otfcff", "/path/to/my/font.otf")
**format** defines the file format to output.
Standard format identifiers can be found in :attr:`BaseFont.generateFormatToExtension`:
Environments are not required to support all of these
and environments may define their own format types.
**path** defines the location where the new file should
be created. If a file already exists at that location,
it will be overwritten by the new file. If **path** defines
a directory, the file will be output as the current
file name, with the appropriate suffix for the format,
into the given directory. If no **path** is given, the
file will be output into the same directory as the source
font with the file named with the current file name,
with the appropriate suffix for the format.
Environments may allow unique keyword arguments in this
method. For example, if a tool allows decomposing components
during a generate routine it may allow this:
>>> font.generate("otfcff", "/p/f.otf", decompose=True)
"""
import warnings
if format is None:
raise ValueError("The format must be defined when generating.")
elif not isinstance(format, basestring):
raise TypeError("The format must be defined as a string.")
env = {}
for key, value in environmentOptions.items():
valid = self._isValidGenerateEnvironmentOption(key)
if not valid:
warnings.warn("The %s argument is not supported "
"in this environment." % key, UserWarning)
env[key] = value
environmentOptions = env
ext = self.generateFormatToExtension(format, "." + format)
if path is None and self.path is None:
raise IOError(("The file cannot be generated because an "
"output path was not defined."))
elif path is None:
path = os.path.splitext(self.path)[0]
path += ext
elif os.path.isdir(path):
if self.path is None:
raise IOError(("The file cannot be generated because "
"the file does not have a path."))
fileName = os.path.basename(self.path)
fileName += ext
path = os.path.join(path, fileName)
path = normalizers.normalizeFilePath(path)
return self._generate(
format=format,
path=path,
environmentOptions=environmentOptions
)
@staticmethod
def _isValidGenerateEnvironmentOption(name):
"""
Any unknown keyword arguments given to :meth:`BaseFont.generate`
will be passed to this method. **name** will be the name
used for the argument. Environments may evaluate if **name**
is a supported option. If it is, they must return `True` if
it is not, they must return `False`.
Subclasses may override this method.
"""
return False
def _generate(self, format, path, environmentOptions, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.generate`. **format** will be a
:ref:`type-string` defining the output format.
Refer to the :meth:`BaseFont.generate` documentation
for the standard format identifiers. If the value
given for **format** is not supported by the environment,
the environment must raise :exc:`FontPartsError`.
**path** will be a :ref:`type-string` defining the
location where the file should be created. It
will have been normalized with :func:`normalizers.normalizeFilePath`.
**environmentOptions** will be a dictionary of names
validated with :meth:`BaseFont._isValidGenerateEnvironmentOption`
nd the given values. These values will not have been passed
through any normalization functions.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# -----------
# Sub-Objects
# -----------
# info
info = dynamicProperty(
"base_info",
"""
The font's :class:`BaseInfo` object.
>>> font.info.familyName
"My Family"
"""
)
def _get_base_info(self):
info = self._get_info()
info.font = self
return info
def _get_info(self):
"""
This is the environment implementation of
:attr:`BaseFont.info`. This must return an
instance of a :class:`BaseInfo` subclass.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# groups
groups = dynamicProperty(
"base_groups",
"""
The font's :class:`BaseGroups` object.
>>> font.groups["myGroup"]
["A", "B", "C"]
"""
)
def _get_base_groups(self):
groups = self._get_groups()
groups.font = self
return groups
def _get_groups(self):
"""
This is the environment implementation of
:attr:`BaseFont.groups`. This must return
an instance of a :class:`BaseGroups` subclass.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# kerning
kerning = dynamicProperty(
"base_kerning",
"""
The font's :class:`BaseKerning` object.
>>> font.kerning["A", "B"]
-100
"""
)
def _get_base_kerning(self):
kerning = self._get_kerning()
kerning.font = self
return kerning
def _get_kerning(self):
"""
This is the environment implementation of
:attr:`BaseFont.kerning`. This must return
an instance of a :class:`BaseKerning` subclass.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def getFlatKerning(self):
"""
Get the font's kerning as a flat dictionary.
"""
return self._getFlatKerning()
def _getFlatKerning(self):
"""
This is the environment implementation of
:meth:`BaseFont.getFlatKerning`.
Subclasses may override this method.
"""
kernOrder = {
(True, True): 0, # group group
(True, False): 1, # group glyph
(False, True): 2, # glyph group
(False, False): 3, # glyph glyph
}
def kerningSortKeyFunc(pair):
g1, g2 = pair
g1grp = g1.startswith("public.kern1.")
g2grp = g2.startswith("public.kern2.")
return (kernOrder[g1grp, g2grp], pair)
flatKerning = dict()
kerning = self.kerning
groups = self.groups
for pair in sorted(self.kerning.keys(), key=kerningSortKeyFunc):
kern = kerning[pair]
(left, right) = pair
if left.startswith("public.kern1."):
left = groups.get(left, [])
else:
left = [left]
if right.startswith("public.kern2."):
right = groups.get(right, [])
else:
right = [right]
for r in right:
for l in left:
flatKerning[(l, r)] = kern
return flatKerning
# features
features = dynamicProperty(
"base_features",
"""
The font's :class:`BaseFeatures` object.
>>> font.features.text
"include(features/substitutions.fea);"
"""
)
def _get_base_features(self):
features = self._get_features()
features.font = self
return features
def _get_features(self):
"""
This is the environment implementation of
:attr:`BaseFont.features`. This must return
an instance of a :class:`BaseFeatures` subclass.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# lib
lib = dynamicProperty(
"base_lib",
"""
The font's :class:`BaseLib` object.
>>> font.lib["org.robofab.hello"]
"world"
"""
)
def _get_base_lib(self):
lib = self._get_lib()
lib.font = self
return lib
def _get_lib(self):
"""
This is the environment implementation of
:attr:`BaseFont.lib`. This must return an
instance of a :class:`BaseLib` subclass.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# -----------------
# Layer Interaction
# -----------------
layers = dynamicProperty(
"base_layers",
"""
The font's :class:`BaseLayer` objects.
>>> for layer in font.layers:
... layer.name
"My Layer 1"
"My Layer 2"
"""
)
def _get_base_layers(self):
layers = self._get_layers()
for layer in layers:
self._setFontInLayer(layer)
return tuple(layers)
def _get_layers(self, **kwargs):
"""
This is the environment implementation of
:attr:`BaseFont.layers`. This must return an
:ref:`type-immutable-list` containing
instances of :class:`BaseLayer` subclasses.
The items in the list should be in the order
defined by :attr:`BaseFont.layerOrder`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# order
layerOrder = dynamicProperty(
"base_layerOrder",
"""
A list of layer names indicating order of the layers in the font.
>>> font.layerOrder = ["My Layer 2", "My Layer 1"]
>>> font.layerOrder
["My Layer 2", "My Layer 1"]
"""
)
def _get_base_layerOrder(self):
value = self._get_layerOrder()
value = normalizers.normalizeLayerOrder(value, self)
return list(value)
def _set_base_layerOrder(self, value):
value = normalizers.normalizeLayerOrder(value, self)
self._set_layerOrder(value)
def _get_layerOrder(self, **kwargs):
"""
This is the environment implementation of
:attr:`BaseFont.layerOrder`. This must return an
:ref:`type-immutable-list` defining the order of
the layers in the font. The contents of the list
must be layer names as :ref:`type-string`. The
list will be normalized with :func:`normalizers.normalizeLayerOrder`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_layerOrder(self, value, **kwargs):
"""
This is the environment implementation of
:attr:`BaseFont.layerOrder`. **value** will
be a **list** of :ref:`type-string` representing
layer names. The list will have been normalized
with :func:`normalizers.normalizeLayerOrder`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# default layer
def _setFontInLayer(self, layer):
if layer.font is None:
layer.font = self
defaultLayerName = dynamicProperty(
"base_defaultLayerName",
"""
The name of the font's default layer.
>>> font.defaultLayerName = "My Layer 2"
>>> font.defaultLayerName
"My Layer 2"
"""
)
def _get_base_defaultLayerName(self):
value = self._get_defaultLayerName()
value = normalizers.normalizeDefaultLayerName(value, self)
return value
def _set_base_defaultLayerName(self, value):
value = normalizers.normalizeDefaultLayerName(value, self)
self._set_defaultLayerName(value)
def _get_defaultLayerName(self):
"""
This is the environment implementation of
:attr:`BaseFont.defaultLayerName`. Return the
name of the default layer as a :ref:`type-string`.
The name will be normalized with
:func:`normalizers.normalizeDefaultLayerName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_defaultLayerName(self, value, **kwargs):
"""
This is the environment implementation of
:attr:`BaseFont.defaultLayerName`. **value**
will be a :ref:`type-string`. It will have
been normalized with :func:`normalizers.normalizeDefaultLayerName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
defaultLayer = dynamicProperty(
"base_defaultLayer",
"""
The font's default layer.
>>> layer = font.defaultLayer
>>> font.defaultLayer = otherLayer
"""
)
def _get_defaultLayer(self):
layer = self._get_base_defaultLayer()
layer = normalizers.normalizeLayer(layer)
return layer
def _set_defaultLayer(self, layer):
layer = normalizers.normalizeLayer(layer)
self._set_base_defaultLayer(layer)
def _get_base_defaultLayer(self):
"""
This is the environment implementation of
:attr:`BaseFont.defaultLayer`. Return the
default layer as a :class:`BaseLayer` object.
The layer will be normalized with
:func:`normalizers.normalizeLayer`.
Subclasses must override this method.
"""
name = self.defaultLayerName
layer = self.getLayer(name)
return layer
def _set_base_defaultLayer(self, value):
"""
This is the environment implementation of
:attr:`BaseFont.defaultLayer`. **value**
will be a :class:`BaseLayer`. It will have
been normalized with :func:`normalizers.normalizeLayer`.
Subclasses must override this method.
"""
self.defaultLayerName = value.name
# get
def getLayer(self, name):
"""
Get the :class:`BaseLayer` with **name**.
>>> layer = font.getLayer("My Layer 2")
"""
name = normalizers.normalizeLayerName(name)
if name not in self.layerOrder:
raise ValueError("No layer with the name '%s' exists." % name)
layer = self._getLayer(name)
self._setFontInLayer(layer)
return layer
def _getLayer(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.getLayer`. **name** will
be a :ref:`type-string`. It will have been
normalized with :func:`normalizers.normalizeLayerName`
and it will have been verified as an existing layer.
This must return an instance of :class:`BaseLayer`.
Subclasses may override this method.
"""
for layer in self.layers:
if layer.name == name:
return layer
# new
def newLayer(self, name, color=None):
"""
Make a new layer with **name** and **color**.
**name** must be a :ref:`type-string` and
**color** must be a :ref:`type-color` or ``None``.
>>> layer = font.newLayer("My Layer 3")
The will return the newly created
:class:`BaseLayer`.
"""
name = normalizers.normalizeLayerName(name)
if name in self.layerOrder:
layer = self.getLayer(name)
if color is not None:
layer.color = color
return layer
if color is not None:
color = normalizers.normalizeColor(color)
layer = self._newLayer(name=name, color=color)
self._setFontInLayer(layer)
return layer
def _newLayer(self, name, color, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.newLayer`. **name** will be
a :ref:`type-string` representing a valid
layer name. The value will have been normalized
with :func:`normalizers.normalizeLayerName` and
**name** will not be the same as the name of
an existing layer. **color** will be a
:ref:`type-color` or ``None``. If the value
is not ``None`` the value will have been
normalized with :func:`normalizers.normalizeColor`.
This must return an instance of a :class:`BaseLayer`
subclass that represents the new layer.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# remove
def removeLayer(self, name):
"""
Remove the layer with **name** from the font.
>>> font.removeLayer("My Layer 3")
"""
name = normalizers.normalizeLayerName(name)
if name not in self.layerOrder:
raise ValueError("No layer with the name '%s' exists." % name)
self._removeLayer(name)
def _removeLayer(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.removeLayer`. **name** will
be a :ref:`type-string` defining the name
of an existing layer. The value will have
been normalized with :func:`normalizers.normalizeLayerName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# insert
def insertLayer(self, layer, name=None):
"""
Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`.
"""
if name is None:
name = layer.name
name = normalizers.normalizeLayerName(name)
if name in self:
self.removeLayer(name)
return self._insertLayer(layer, name=name)
def _insertLayer(self, layer, name, **kwargs):
"""
This is the environment implementation of :meth:`BaseFont.insertLayer`.
This must return an instance of a :class:`BaseLayer` subclass.
**layer** will be a layer object with the attributes necessary
for copying as defined in :meth:`BaseLayer.copy` An environment
must not insert **layer** directly. Instead the data from **layer**
should be copied to a new layer. **name** will be a :ref:`type-string`
representing a glyph layer. It will have been normalized with
:func:`normalizers.normalizeLayerName`. **name** will have been
tested to make sure that no layer with the same name exists in the font.
Subclasses may override this method.
"""
if name != layer.name and layer.name in self.layerOrder:
layer = layer.copy()
layer.name = name
dest = self.newLayer(name)
dest.copyData(layer)
return dest
# duplicate
def duplicateLayer(self, layerName, newLayerName):
"""
Duplicate the layer with **layerName**, assign
**newLayerName** to the new layer and insert the
new layer into the font. ::
>>> layer = font.duplicateLayer("layer 1", "layer 2")
"""
layerOrder = self.layerOrder
layerName = normalizers.normalizeLayerName(layerName)
if layerName not in layerOrder:
raise ValueError("No layer with the name '%s' exists." % layerName)
newLayerName = normalizers.normalizeLayerName(newLayerName)
if newLayerName in layerOrder:
raise ValueError("A layer with the name '%s' already exists." % newLayerName)
newLayer = self._duplicateLayer(layerName, newLayerName)
newLayer = normalizers.normalizeLayer(newLayer)
return newLayer
def _duplicateLayer(self, layerName, newLayerName):
"""
This is the environment implementation of :meth:`BaseFont.duplicateLayer`.
**layerName** will be a :ref:`type-string` representing a valid layer name.
The value will have been normalized with :func:`normalizers.normalizeLayerName`
and **layerName** will be a layer that exists in the font. **newLayerName**
will be a :ref:`type-string` representing a valid layer name. The value will
have been normalized with :func:`normalizers.normalizeLayerName` and
**newLayerName** will have been tested to make sure that no layer with
the same name exists in the font. This must return an instance of a
:class:`BaseLayer` subclass.
Subclasses may override this method.
"""
newLayer = self.getLayer(layerName).copy()
return self.insertLayer(newLayer, newLayerName)
def swapLayerNames(self, layerName, otherLayerName):
"""
Assign **layerName** to the layer currently named
**otherLayerName** and assign the name **otherLayerName**
to the layer currently named **layerName**.
>>> font.swapLayerNames("before drawing revisions", "after drawing revisions")
"""
layerOrder = self.layerOrder
layerName = normalizers.normalizeLayerName(layerName)
if layerName not in layerOrder:
raise ValueError("No layer with the name '%s' exists." % layerName)
otherLayerName = normalizers.normalizeLayerName(otherLayerName)
if otherLayerName not in layerOrder:
raise ValueError("No layer with the name '%s' exists." % otherLayerName)
self._swapLayers(layerName, otherLayerName)
def _swapLayers(self, layerName, otherLayerName):
"""
This is the environment implementation of :meth:`BaseFont.swapLayerNames`.
**layerName** will be a :ref:`type-string` representing a valid layer name.
The value will have been normalized with :func:`normalizers.normalizeLayerName`
and **layerName** will be a layer that exists in the font. **otherLayerName**
will be a :ref:`type-string` representing a valid layer name. The value will
have been normalized with :func:`normalizers.normalizeLayerName` and
**otherLayerName** will be a layer that exists in the font.
Subclasses may override this method.
"""
import random
layer1 = self.getLayer(layerName)
layer2 = self.getLayer(otherLayerName)
# make a temporary name and assign it to
# the first layer to prevent two layers
# from having the same name at once.
layerOrder = self.layerOrder
for _ in range(50):
# shout out to PostScript unique IDs
tempLayerName = str(random.randint(4000000, 4999999))
if tempLayerName not in layerOrder:
break
if tempLayerName in layerOrder:
raise FontPartsError(("Couldn't find a temporary layer name "
"after 50 tries. Sorry. Please try again."))
layer1.name = tempLayerName
# now swap
layer2.name = layerName
layer1.name = otherLayerName
# -----------------
# Glyph Interaction
# -----------------
# base implementation overrides
def _getItem(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.__getitem__`. **name** will
be a :ref:`type-string` defining an existing
glyph in the default layer. The value will
have been normalized with :func:`normalizers.normalizeGlyphName`.
Subclasses may override this method.
"""
layer = self.defaultLayer
return layer[name]
def _keys(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.keys`. This must return an
:ref:`type-immutable-list` of all glyph names
in the default layer.
Subclasses may override this method.
"""
layer = self.defaultLayer
return layer.keys()
def _newGlyph(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.newGlyph`. **name** will be
a :ref:`type-string` representing a valid
glyph name. The value will have been tested
to make sure that an existing glyph in the
default layer does not have an identical name.
The value will have been normalized with
:func:`normalizers.normalizeGlyphName`. This
must return an instance of :class:`BaseGlyph`
representing the new glyph.
Subclasses may override this method.
"""
layer = self.defaultLayer
# clear is False here because the base newFont
# that has called this method will have already
# handled the clearing as specified by the caller.
return layer.newGlyph(name, clear=False)
def _removeGlyph(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.removeGlyph`. **name** will
be a :ref:`type-string` representing an
existing glyph in the default layer. The
value will have been normalized with
:func:`normalizers.normalizeGlyphName`.
Subclasses may override this method.
"""
layer = self.defaultLayer
layer.removeGlyph(name)
def __setitem__(self, name, glyph):
"""
Insert **glyph** into the font. ::
>>> glyph = font["A"] = otherGlyph
This will not insert the glyph directly. Rather, a
new glyph will be created and the data from **glyph**
will be copied to the new glyph. **name** indicates
the name that should be assigned to the glyph after
insertion. The data that will be inserted
from **glyph** is the same data as documented in
:meth:`BaseGlyph.copy`.
On a font level **font.glyphOrder** will be preserved
if the **name** is already present.
"""
name = normalizers.normalizeGlyphName(name)
if name in self:
# clear the glyph here if the glyph exists
dest = self._getItem(name)
dest.clear()
return self._insertGlyph(glyph, name=name, clear=False)
# order
glyphOrder = dynamicProperty(
"base_glyphOrder",
"""
The preferred order of the glyphs in the font.
>>> font.glyphOrder
["C", "B", "A"]
>>> font.glyphOrder = ["A", "B", "C"]
"""
)
def _get_base_glyphOrder(self):
value = self._get_glyphOrder()
value = normalizers.normalizeGlyphOrder(value)
return value
def _set_base_glyphOrder(self, value):
value = normalizers.normalizeGlyphOrder(value)
self._set_glyphOrder(value)
def _get_glyphOrder(self):
"""
This is the environment implementation of
:attr:`BaseFont.glyphOrder`. This must return
an :ref:`type-immutable-list` containing glyph
names representing the glyph order in the font.
The value will be normalized with
:func:`normalizers.normalizeGlyphOrder`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_glyphOrder(self, value):
"""
This is the environment implementation of
:attr:`BaseFont.glyphOrder`. **value** will
be a list of :ref:`type-string`. It will
have been normalized with
:func:`normalizers.normalizeGlyphOrder`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# -----------------
# Global Operations
# -----------------
def round(self):
"""
Round all approriate data to integers.
>>> font.round()
This is the equivalent of calling the round method on:
* info
* kerning
* the default layer
* font-level guidelines
This applies only to the default layer.
"""
self._round()
def _round(self):
"""
This is the environment implementation of
:meth:`BaseFont.round`.
Subclasses may override this method.
"""
layer = self.defaultLayer
layer.round()
self.info.round()
self.kerning.round()
for guideline in self.guidelines:
guideline.round()
def autoUnicodes(self):
"""
Use heuristics to set Unicode values in all glyphs.
>>> font.autoUnicodes()
Environments will define their own heuristics for
automatically determining values.
This applies only to the default layer.
"""
self._autoUnicodes()
def _autoUnicodes(self):
"""
This is the environment implementation of
:meth:`BaseFont.autoUnicodes`.
Subclasses may override this method.
"""
layer = self.defaultLayer
layer.autoUnicodes()
# ----------
# Guidelines
# ----------
def _setFontInGuideline(self, guideline):
if guideline.font is None:
guideline.font = self
guidelines = dynamicProperty(
"guidelines",
"""
An :ref:`type-immutable-list` of font-level :class:`BaseGuideline` objects.
>>> for guideline in font.guidelines:
... guideline.angle
0
45
90
"""
)
def _get_guidelines(self):
"""
This is the environment implementation of
:attr:`BaseFont.guidelines`. This must
return an :ref:`type-immutable-list` of
:class:`BaseGuideline` objects.
Subclasses may override this method.
"""
return tuple([self._getitem__guidelines(i)
for i in range(self._len__guidelines())])
def _len__guidelines(self):
return self._lenGuidelines()
def _lenGuidelines(self, **kwargs):
"""
This must return an integer indicating
the number of font-level guidelines
in the font.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _getitem__guidelines(self, index):
index = normalizers.normalizeIndex(index)
if index >= self._len__guidelines():
raise ValueError("No guideline located at index %d." % index)
guideline = self._getGuideline(index)
self._setFontInGuideline(guideline)
return guideline
def _getGuideline(self, index, **kwargs):
"""
This must return a :class:`BaseGuideline` object.
**index** will be a valid **index**.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _getGuidelineIndex(self, guideline):
for i, other in enumerate(self.guidelines):
if guideline == other:
return i
raise FontPartsError("The guideline could not be found.")
def appendGuideline(self, position=None, angle=None, name=None, color=None, guideline=None):
"""
Append a new guideline to the font.
>>> guideline = font.appendGuideline((50, 0), 90)
>>> guideline = font.appendGuideline((0, 540), 0, name="overshoot",
>>> color=(0, 0, 0, 0.2))
**position** must be a :ref:`type-coordinate`
indicating the position of the guideline.
**angle** indicates the :ref:`type-angle` of
the guideline. **name** indicates the name
for the guideline. This must be a :ref:`type-string`
or ``None``. **color** indicates the color for
the guideline. This must be a :ref:`type-color`
or ``None``. This will return the newly created
:class:`BaseGuidline` object.
``guideline`` may be a :class:`BaseGuideline` object from which
attribute values will be copied. If ``position``, ``angle``, ``name``
or ``color`` are specified as arguments, those values will be used
instead of the values in the given guideline object.
"""
identifier = None
if guideline is not None:
guideline = normalizers.normalizeGuideline(guideline)
if position is None:
position = guideline.position
if angle is None:
angle = guideline.angle
if name is None:
name = guideline.name
if color is None:
color = guideline.color
if guideline.identifier is not None:
existing = set([g.identifier for g in self.guidelines if g.identifier is not None])
if guideline.identifier not in existing:
identifier = guideline.identifier
position = normalizers.normalizeCoordinateTuple(position)
angle = normalizers.normalizeRotationAngle(angle)
if name is not None:
name = normalizers.normalizeGuidelineName(name)
if color is not None:
color = normalizers.normalizeColor(color)
identifier = normalizers.normalizeIdentifier(identifier)
guideline = self._appendGuideline(position, angle, name=name, color=color, identifier=identifier)
guideline.font = self
return guideline
def _appendGuideline(self, position, angle, name=None, color=None, identifier=None, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.appendGuideline`. **position**
will be a valid :ref:`type-coordinate`. **angle**
will be a valid angle. **name** will be a valid
:ref:`type-string` or ``None``. **color** will
be a valid :ref:`type-color` or ``None``.
This must return the newly created
:class:`BaseGuideline` object.
Subclasses may override this method.
"""
self.raiseNotImplementedError()
def removeGuideline(self, guideline):
"""
Remove **guideline** from the font.
>>> font.removeGuideline(guideline)
>>> font.removeGuideline(2)
**guideline** can be a guideline object or
an integer representing the guideline index.
"""
if isinstance(guideline, int):
index = guideline
else:
index = self._getGuidelineIndex(guideline)
index = normalizers.normalizeIndex(index)
if index >= self._len__guidelines():
raise ValueError("No guideline located at index %d." % index)
self._removeGuideline(index)
def _removeGuideline(self, index, **kwargs):
"""
This is the environment implementation of
:meth:`BaseFont.removeGuideline`. **index**
will be a valid index.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def clearGuidelines(self):
"""
Clear all guidelines.
>>> font.clearGuidelines()
"""
self._clearGuidelines()
def _clearGuidelines(self):
"""
This is the environment implementation of
:meth:`BaseFont.clearGuidelines`.
Subclasses may override this method.
"""
for _ in range(self._len__guidelines()):
self.removeGuideline(-1)
# -------------
# Interpolation
# -------------
def interpolate(self, factor, minFont, maxFont,
round=True, suppressError=True):
"""
Interpolate all possible data in the font.
>>> font.interpolate(0.5, otherFont1, otherFont2)
>>> font.interpolate((0.5, 2.0), otherFont1, otherFont2, round=False)
The interpolation occurs on a 0 to 1.0 range where **minFont**
is located at 0 and **maxFont** is located at 1.0. **factor**
is the interpolation value. It may be less than 0 and greater
than 1.0. It may be a :ref:`type-int-float` or a tuple of
two :ref:`type-int-float`. If it is a tuple, the first
number indicates the x factor and the second number indicates
the y factor. **round** indicates if the result should be
rounded to integers. **suppressError** indicates if incompatible
data should be ignored or if an error should be raised when
such incompatibilities are found.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minFont, BaseFont):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, minFont.__class__.__name__))
if not isinstance(maxFont, BaseFont):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, maxFont.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minFont, maxFont,
round=round, suppressError=suppressError)
def _interpolate(self, factor, minFont, maxFont,
round=True, suppressError=True):
"""
This is the environment implementation of
:meth:`BaseFont.interpolate`.
Subclasses may override this method.
"""
# layers
for layerName in self.layerOrder:
self.removeLayer(layerName)
for layerName in minFont.layerOrder:
if layerName not in maxFont.layerOrder:
continue
minLayer = minFont.getLayer(layerName)
maxLayer = maxFont.getLayer(layerName)
dstLayer = self.newLayer(layerName)
dstLayer.interpolate(factor, minLayer, maxLayer,
round=round, suppressError=suppressError)
if self.layerOrder:
self.defaultLayer = self.getLayer(self.layerOrder[0])
# kerning and groups
self.kerning.interpolate(factor, minFont.kerning, maxFont.kerning,
round=round, suppressError=suppressError)
# info
self.info.interpolate(factor, minFont.info, maxFont.info,
round=round, suppressError=suppressError)
compatibilityReporterClass = FontCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**.
>>> compatible, report = self.isCompatible(otherFont)
>>> compatible
False
>>> report
[Fatal] Glyph: "test1" + "test2"
[Fatal] Glyph: "test1" contains 1 contours | "test2" contains 2 contours
This will return a ``bool`` indicating if the font is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseFont, self).isCompatible(other, BaseFont)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseFont.isCompatible`.
Subclasses may override this method.
"""
font1 = self
font2 = other
# incompatible guidelines
guidelines1 = set(font1.guidelines)
guidelines2 = set(font2.guidelines)
if len(guidelines1) != len(guidelines2):
reporter.warning = True
reporter.guidelineCountDifference = True
if len(guidelines1.difference(guidelines2)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromFont2 = list(
guidelines1.difference(guidelines2))
if len(guidelines2.difference(guidelines1)) != 0:
reporter.warning = True
reporter.guidelinesMissingInFont1 = list(
guidelines2.difference(guidelines1))
# incompatible layers
layers1 = set(font1.layerOrder)
layers2 = set(font2.layerOrder)
if len(layers1) != len(layers2):
reporter.warning = True
reporter.layerCountDifference = True
if len(layers1.difference(layers2)) != 0:
reporter.warning = True
reporter.layersMissingFromFont2 = list(layers1.difference(layers2))
if len(layers2.difference(layers1)) != 0:
reporter.warning = True
reporter.layersMissingInFont1 = list(layers2.difference(layers1))
# test layers
for layerName in sorted(layers1.intersection(layers2)):
layer1 = font1.getLayer(layerName)
layer2 = font2.getLayer(layerName)
layerCompatibility = layer1.isCompatible(layer2)[1]
if layerCompatibility.fatal or layerCompatibility.warning:
if layerCompatibility.fatal:
reporter.fatal = True
if layerCompatibility.warning:
reporter.warning = True
reporter.layers.append(layerCompatibility)
# -------
# mapping
# -------
def getReverseComponentMapping(self):
"""
Create a dictionary of unicode -> [glyphname, ...] mappings.
All glyphs are loaded. Note that one glyph can have multiple unicode values,
and a unicode value can have multiple glyphs pointing to it.
"""
return self._getReverseComponentMapping()
def _getReverseComponentMapping(self):
"""
This is the environment implementation of
:meth:`BaseFont.getReverseComponentMapping`.
Subclasses may override this method.
"""
layer = self.defaultLayer
return layer.getReverseComponentMapping()
def getCharacterMapping(self):
"""
Get a reversed map of component references in the font.
{
'A' : ['Aacute', 'Aring']
'acute' : ['Aacute']
'ring' : ['Aring']
etc.
}
"""
return self._getCharacterMapping()
def _getCharacterMapping(self):
"""
This is the environment implementation of
:meth:`BaseFont.getCharacterMapping`.
Subclasses may override this method.
"""
layer = self.defaultLayer
return layer.getCharacterMapping()
# ---------
# Selection
# ---------
# layers
selectedLayers = dynamicProperty(
"base_selectedLayers",
"""
A list of layers selected in the layer.
Getting selected layer objects:
>>> for layer in layer.selectedLayers:
... layer.color = (1, 0, 0, 0.5)
Setting selected layer objects:
>>> layer.selectedLayers = someLayers
"""
)
def _get_base_selectedLayers(self):
selected = tuple([normalizers.normalizeLayer(layer) for
layer in self._get_selectedLayers()])
return selected
def _get_selectedLayers(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self.layers)
def _set_base_selectedLayers(self, value):
normalized = [normalizers.normalizeLayer(layer) for layer in value]
self._set_selectedLayers(normalized)
def _set_selectedLayers(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self.layers, value)
selectedLayerNames = dynamicProperty(
"base_selectedLayerNames",
"""
A list of names of layers selected in the layer.
Getting selected layer names:
>>> for name in layer.selectedLayerNames:
... print(name)
Setting selected layer names:
>>> layer.selectedLayerNames = ["A", "B", "C"]
"""
)
def _get_base_selectedLayerNames(self):
selected = tuple([normalizers.normalizeLayerName(name) for
name in self._get_selectedLayerNames()])
return selected
def _get_selectedLayerNames(self):
"""
Subclasses may override this method.
"""
selected = [layer.name for layer in self.selectedLayers]
return selected
def _set_base_selectedLayerNames(self, value):
normalized = [normalizers.normalizeLayerName(name) for name in value]
self._set_selectedLayerNames(normalized)
def _set_selectedLayerNames(self, value):
"""
Subclasses may override this method.
"""
select = [self.layers(name) for name in value]
self.selectedLayers = select
# guidelines
selectedGuidelines = dynamicProperty(
"base_selectedGuidelines",
"""
A list of guidelines selected in the font.
Getting selected guideline objects:
>>> for guideline in font.selectedGuidelines:
... guideline.color = (1, 0, 0, 0.5)
Setting selected guideline objects:
>>> font.selectedGuidelines = someGuidelines
Setting also supports guideline indexes:
>>> font.selectedGuidelines = [0, 2]
"""
)
def _get_base_selectedGuidelines(self):
selected = tuple([normalizers.normalizeGuideline(guideline) for
guideline in self._get_selectedGuidelines()])
return selected
def _get_selectedGuidelines(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self.guidelines)
def _set_base_selectedGuidelines(self, value):
normalized = []
for i in value:
if isinstance(i, int):
i = normalizers.normalizeIndex(i)
else:
i = normalizers.normalizeGuideline(i)
normalized.append(i)
self._set_selectedGuidelines(normalized)
def _set_selectedGuidelines(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self.guidelines, value)
|
import re
from mimetypes import guess_type
from django.urls import reverse
from markupsafe import Markup
from maps.model.type import Type
def link(entity):
""" Returning an HTML link for an entity."""
url = reverse(entity._meta.db_table.replace('_', ':') + '-detail', kwargs={'pk': entity.id})
return Markup('<a href = "' + url + '">' + truncate_string(entity.name) + '</a>')
def get_selected_nodes(name, request):
nodes = []
for node in Type.objects.get(name=name, parent=None).get_children():
field_name = 'map-type-' + sanitize(node.name) + '-id'
if field_name in request.POST and request.POST.get(field_name).split(',') != ['']:
nodes += request.POST.get(field_name).split(',')
return nodes
def sanitize(string):
return re.sub('[^A-Za-z0-9]+', '', string)
def truncate_string(string, length=80, title=True):
"""
Returns a truncated string with '..' at the end if it is longer than the length parameter.
If the title param is true a span with the original string as title (for mouse over) is added.
"""
if string is None:
return '' # pragma: no cover
if len(string) > length + 2:
if title:
title = string.replace('"', '')
string = '<span title="' + title + '">' + string[:length] + '..' + '</span>'
else:
string = string[:length] + '..'
return string
def get_mime_type(file_name):
return guess_type(file_name)[0]
|
#!/usr/bin/env python3
import itertools
def increment(_):
return 1
def strange_jump(offset):
return -1 if offset > 2 else 1
def cpu_states(initial_state, increase_func, at=0):
pos = at
# Give this man a medal -- make a COPY of input data.
# <https://www.reddit.com/r/adventofcode/comments/7hr5ya/psa2017_day_5_part_2_offset_of_three_or_more_is/dqtg5pz/>
state = initial_state[:]
num_instructions = len(state)
while True:
yield state, pos
if 0 <= pos < num_instructions:
old_pos = pos
offset = state[pos]
pos += offset
state[old_pos] += increase_func(offset)
def puzzle(initial_state, increase_func=increment):
num_instructions = len(initial_state)
state = cpu_states(initial_state, increase_func=increase_func)
for step in itertools.count():
cur_state, pos = next(state)
if not (0 <= pos < num_instructions):
return step, cur_state
def assert_next(iter, expected):
actual = next(iter)
assert actual == expected, "expected %s, got %s" % (expected, actual)
assert increment(0) == 1
assert all(strange_jump(n) == +1 for n in range(-10, 3))
assert all(strange_jump(n) == -1 for n in range(3, 10))
state = cpu_states([0, 3, 0, 1, -3], increase_func=increment, at=0)
assert_next(state, ([0, 3, 0, 1, -3], 0))
assert_next(state, ([1, 3, 0, 1, -3], 0))
assert_next(state, ([2, 3, 0, 1, -3], 1))
assert_next(state, ([2, 4, 0, 1, -3], 4))
assert_next(state, ([2, 4, 0, 1, -2], 1))
assert_next(state, ([2, 5, 0, 1, -2], 5))
assert_next(state, ([2, 5, 0, 1, -2], 5))
assert_next(state, ([2, 5, 0, 1, -2], 5))
state = cpu_states([2, 0], increase_func=increment, at=0)
assert_next(state, ([2, 0], 0))
assert_next(state, ([3, 0], 2))
state = cpu_states([0, -2], increase_func=increment, at=1)
assert_next(state, ([0, -2], 1))
assert_next(state, ([0, -1], -1))
steps, end_state = puzzle([0, 3, 0, 1, -3], increase_func=increment)
assert steps == 5
assert end_state == [2, 5, 0, 1, -2]
steps, end_state = puzzle([2, 0], increase_func=increment)
assert steps == 1
assert end_state == [3, 0]
steps, end_state = puzzle([0, 3, 0, 1, -3], increase_func=strange_jump)
assert steps == 10
assert end_state == [2, 3, 2, 3, -1]
if __name__ == '__main__':
with open('input') as f:
lines = f.readlines()
initial_state = [int(line.strip()) for line in lines]
step, *_ = puzzle(initial_state, increase_func=increment)
print(step)
step, *_ = puzzle(initial_state, increase_func=strange_jump)
print(step)
|
import datetime as dt
from functools import reduce
from itertools import groupby
from django import forms
from django.db.models import Q
from django.shortcuts import render
from django.utils.html import format_html, format_html_join
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
from workbench.accounts.models import Team, User
from workbench.accounts.reporting import (
average_employment_duration,
birthdays,
work_anniversaries,
)
from workbench.invoices.models import Invoice
from workbench.invoices.utils import next_valid_day
from workbench.logbook.models import LoggedCost
from workbench.logbook.reporting import logbook_stats
from workbench.projects.models import Project
from workbench.projects.reporting import overdrawn_projects
from workbench.reporting import (
green_hours,
key_data,
labor_costs,
project_budget_statistics,
)
from workbench.reporting.utils import date_ranges
from workbench.tools.formats import Z0, Z2, local_date_format
from workbench.tools.forms import DateInput, Form
from workbench.tools.validation import filter_form, in_days, monday
from workbench.tools.xlsx import WorkbenchXLSXDocument
def overdrawn_projects_view(request):
return render(
request,
"reporting/overdrawn_projects.html",
{"overdrawn_projects": overdrawn_projects()},
)
class OpenItemsForm(Form):
cutoff_date = forms.DateField(label=capfirst(_("cutoff date")), widget=DateInput())
def __init__(self, data, *args, **kwargs):
data = data.copy()
data.setdefault("cutoff_date", dt.date.today().isoformat())
super().__init__(data, *args, **kwargs)
def open_items_list(self):
open_items = (
Invoice.objects.invoiced()
.filter(
Q(invoiced_on__lte=self.cleaned_data["cutoff_date"]),
Q(closed_on__gt=self.cleaned_data["cutoff_date"])
| Q(closed_on__isnull=True),
)
.order_by("invoiced_on", "pk")
.select_related("owned_by", "customer", "project")
)
return {
"list": open_items,
"total_excl_tax": sum((i.total_excl_tax for i in open_items), Z2),
"total": sum((i.total for i in open_items), Z2),
}
@filter_form(OpenItemsForm)
def open_items_list(request, form):
if request.GET.get("export") == "xlsx":
xlsx = WorkbenchXLSXDocument()
xlsx.table_from_queryset(
form.open_items_list()["list"].select_related(
"customer", "contact__organization", "owned_by", "project__owned_by"
)
)
return xlsx.to_response(
"open-items-list-{}.xlsx".format(
form.cleaned_data["cutoff_date"].isoformat()
)
)
return render(
request,
"reporting/open_items_list.html",
{"form": form, "open_items_list": form.open_items_list()},
)
def key_data_details(fn):
def view(request, year, month):
year = int(year)
month = int(month)
date_range = [dt.date(year, month, 1)]
date_range.append(next_valid_day(year, month + 1, 1) - dt.timedelta(days=1))
return fn(request, date_range)
return view
@key_data_details
def key_data_gross_profit(request, date_range):
return render(
request,
"reporting/key_data_gross_profit.html",
{
"date_range": date_range,
"invoices": Invoice.objects.invoiced()
.filter(invoiced_on__range=date_range)
.order_by("invoiced_on", "id")
.select_related("project", "owned_by"),
},
)
@key_data_details
def key_data_third_party_costs(request, date_range):
return render(
request,
"reporting/key_data_third_party_costs.html",
{
"date_range": date_range,
"third_party_costs": LoggedCost.objects.filter(
rendered_on__range=date_range,
third_party_costs__isnull=False,
invoice_service__isnull=True,
)
.order_by("rendered_on", "id")
.select_related("service"),
"invoices": Invoice.objects.invoiced()
.filter(~Q(type=Invoice.DOWN_PAYMENT))
.filter(Q(invoiced_on__range=date_range), ~Q(third_party_costs=Z2))
.order_by("invoiced_on", "id")
.select_related("project", "owned_by"),
},
)
def key_data_projected_invoices(request):
pi = key_data.projected_invoices()
all_months = sorted(
reduce(lambda a, b: a | b["monthly"].keys(), pi["projects"], set())
)
return render(
request,
"reporting/key_data_projected_invoices.html",
{
"projects": sorted(
(
project
| {"monthly": [project["monthly"].get(m, Z2) for m in all_months]}
for project in pi["projects"]
),
key=lambda project: project["monthly"],
reverse=True,
),
"months": [dt.date(m[0], m[1], 1) for m in all_months],
"monthly_overall": [pi["monthly_overall"].get(m) for m in all_months],
},
)
def key_data_view(request):
today = dt.date.today()
date_range = [dt.date(today.year - 3, 1, 1), dt.date(today.year, 12, 31)]
gross_margin_by_month = key_data.gross_margin_by_month(date_range)
gross_margin_months = {
row["month"]: row["gross_margin"] for row in gross_margin_by_month
}
projected_invoices = key_data.projected_invoices()
gross_margin_by_years = {}
for month in gross_margin_by_month:
try:
year = gross_margin_by_years[month["date"].year]
except KeyError:
gross_margin_by_years[month["date"].year] = year = {
"year": month["date"].year,
"gross_profit": Z2,
"third_party_costs": Z2,
"accruals": Z2,
"gross_margin": Z2,
"fte": [],
"margin_per_fte": [],
"months": [],
}
year["months"].append(month)
year["gross_profit"] += month["gross_profit"]
year["third_party_costs"] += month["third_party_costs"]
year["accruals"] += month["accruals"]["delta"]
year["gross_margin"] += month["gross_margin"]
year["fte"].append(month["fte"])
for year in gross_margin_by_years.values():
year["fte"] = sum(year["fte"]) / len(year["fte"])
year["margin_per_fte"] = (
year["gross_margin"] / year["fte"] if year["fte"] else None
)
gh = [
row
for row in green_hours.green_hours_by_month()
if date_range[0] <= row["month"] <= date_range[1]
]
def yearly_headline(gh):
zero = {"green": Z2, "red": Z2, "maintenance": Z2, "internal": Z2, "total": Z2}
for key, months in groupby(gh, key=lambda row: row["month"].year):
this = zero.copy()
months = list(months)
for month in months:
this["green"] += month["green"]
this["red"] += month["red"]
this["maintenance"] += month["maintenance"]
this["internal"] += month["internal"]
this["total"] += month["total"]
this["percentage"] = (
100 * (this["green"] + this["maintenance"]) / this["total"]
).quantize(Z0)
yield key, this, months
return render(
request,
"reporting/key_data.html",
{
"date_range": date_range,
"gross_margin_by_years": [
row[1] for row in sorted(gross_margin_by_years.items())
],
"gross_margin_by_month": gross_margin_by_month,
"invoiced_corrected": [
(year, [gross_margin_months.get((year, i), Z2) for i in range(1, 13)])
for year in range(date_range[0].year, date_range[1].year + 1)
],
"projected_invoices": [
projected_invoices["monthly_overall"].get((today.year, i), Z2)
for i in range(1, 13)
],
"green_hours": yearly_headline(gh),
"hours_distribution": {
"labels": [local_date_format(row["month"], fmt="F Y") for row in gh],
"datasets": [
{
"label": label,
"data": [100 * row[attribute] / row["total"] for row in gh],
}
for label, attribute in [
(_("profitable"), "green"),
(_("overdrawn"), "red"),
(_("maintenance"), "maintenance"),
(_("internal"), "internal"),
]
],
},
"service_hours_in_open_orders": key_data.service_hours_in_open_orders(),
"logged_hours_in_open_orders": key_data.logged_hours_in_open_orders(),
"sent_invoices_total": key_data.sent_invoices_total(),
"open_offers_total": key_data.open_offers_total(),
"average_employment_duration": average_employment_duration(),
},
)
class ProjectBudgetStatisticsForm(Form):
owned_by = forms.TypedChoiceField(label="", coerce=int, required=False)
cutoff_date = forms.DateField(widget=DateInput, label="")
closed_during_the_last_year = forms.BooleanField(
label=_("Closed during the last year"), required=False
)
internal = forms.BooleanField(label=_("Internal"), required=False)
def __init__(self, data, *args, **kwargs):
data = data.copy()
today = dt.date.today()
data.setdefault("cutoff_date", today.isoformat())
kwargs.setdefault("initial", {}).setdefault("cutoff_date", today)
super().__init__(data, *args, **kwargs)
self.fields["owned_by"].choices = User.objects.choices(
collapse_inactive=True, myself=True
)
def queryset(self):
data = self.cleaned_data
if data.get("closed_during_the_last_year"):
queryset = Project.objects.closed().filter(closed_on__gte=in_days(-366))
else:
queryset = Project.objects.open(on=self.cleaned_data.get("cutoff_date"))
if data.get("internal"):
queryset = queryset.filter(type=Project.INTERNAL)
else:
queryset = queryset.exclude(type=Project.INTERNAL)
queryset = self.apply_owned_by(queryset)
return queryset.select_related("owned_by")
@filter_form(ProjectBudgetStatisticsForm)
def project_budget_statistics_view(request, form):
statistics = project_budget_statistics.project_budget_statistics(
form.queryset(), cutoff_date=form.cleaned_data.get("cutoff_date")
)
if form.cleaned_data.get("closed_during_the_last_year"):
statistics["statistics"] = sorted(
statistics["statistics"], key=lambda s: s["project"].closed_on, reverse=True
)
if request.GET.get("export") == "xlsx" and statistics["statistics"]:
xlsx = WorkbenchXLSXDocument()
xlsx.project_budget_statistics(statistics)
return xlsx.to_response("project-budget-statistics.xlsx")
return render(
request,
"reporting/project_budget_statistics.html",
{"form": form, "statistics": statistics},
)
class DateRangeFilterForm(Form):
date_from = forms.DateField(
label=_("Date from"), required=False, widget=DateInput()
)
date_until = forms.DateField(
label=_("Date until"), required=False, widget=DateInput()
)
def __init__(self, data, *args, **kwargs):
data = data.copy()
data.setdefault("date_from", monday().isoformat())
data.setdefault("date_until", (monday() + dt.timedelta(days=6)).isoformat())
super().__init__(data, *args, **kwargs)
self.fields["date_from"].help_text = format_html(
"{}: {}",
_("Set predefined period"),
format_html_join(
", ", '<a href="#" data-set-period="{}:{}">{}</a>', date_ranges()
),
)
self.fields["date_until"].help_text = format_html(
"{}: {}",
_("Set date"),
format_html_join(
", ",
'<a href="#" data-field-value="{}">{}</a>',
[(dt.date.today().isoformat(), _("today"))],
),
)
class DateRangeAndTeamFilterForm(DateRangeFilterForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["team"] = forms.TypedChoiceField(
choices=[("", _("Everyone"))]
+ [
[
capfirst(Team._meta.verbose_name_plural),
[(team.id, str(team)) for team in Team.objects.all()],
]
]
+ [
[
capfirst(User._meta.verbose_name_plural),
[
(-user.id, str(user))
for user in User.objects.filter(is_active=True)
],
]
],
label=capfirst(Team._meta.verbose_name),
required=False,
coerce=int,
)
def users(self):
data = self.cleaned_data
queryset = User.objects.all()
if data.get("team") and data.get("team") > 0:
queryset = queryset.filter(teams=data.get("team"))
elif data.get("team") and data.get("team") < 0:
queryset = queryset.filter(id=-data.get("team"))
return queryset
@filter_form(DateRangeAndTeamFilterForm)
def hours_filter_view(request, form, *, template_name, stats_fn):
return render(
request,
template_name,
{
"form": form,
"stats": stats_fn(
[form.cleaned_data["date_from"], form.cleaned_data["date_until"]],
users=form.users(),
),
},
)
@filter_form(DateRangeFilterForm)
def labor_costs_view(request, form):
date_range = [form.cleaned_data["date_from"], form.cleaned_data["date_until"]]
if project := request.GET.get("project"):
return render(
request,
"reporting/labor_costs_by_user.html",
{
"stats": labor_costs.labor_costs_by_user(date_range, project=project),
},
)
elif cost_center := request.GET.get("cost_center"):
return render(
request,
"reporting/labor_costs_by_user.html",
{
"stats": labor_costs.labor_costs_by_user(
date_range, cost_center=cost_center
),
},
)
elif request.GET.get("users"):
return render(
request,
"reporting/labor_costs_by_user.html",
{"stats": labor_costs.labor_costs_by_user(date_range)},
)
return render(
request,
"reporting/labor_costs.html",
{
"stats": labor_costs.labor_costs_by_cost_center(date_range),
"date_range": date_range,
"form": form,
},
)
@filter_form(DateRangeFilterForm)
def logging(request, form):
date_range = [form.cleaned_data["date_from"], form.cleaned_data["date_until"]]
return render(
request,
"reporting/logging.html",
{"form": form, "logbook_stats": logbook_stats(date_range)},
)
def work_anniversaries_view(request):
return render(
request,
"reporting/work_anniversaries.html",
{"work_anniversaries": work_anniversaries()},
)
def birthdays_view(request):
return render(
request,
"reporting/birthdays.html",
{"birthdays": birthdays()},
)
|
import ca_certs_locater
import mock
import unittest
class TestLocator(unittest.TestCase):
@ mock.patch('os.path.exists')
def test_linux_exists(self, exists):
# If the file exists, return it
exists.return_value = True
fn = ca_certs_locater.get()
self.assertEqual(fn, ca_certs_locater.LINUX_PATH)
@ mock.patch('os.path.exists')
def test_linux_does_not_exist(self, exists):
# If the file does not exist, fall back
exists.return_value = False
self.assertRaises(ImportError, ca_certs_locater.get)
|
# This is the file that implements a flask server to do inferences. It's the file that you will modify
# to implement the prediction for your own algorithm.
from __future__ import print_function
import os
import sys
import stat
import json
import shutil
import flask
from flask import Flask, jsonify, request, make_response, Response
import glob
import pandas as pd
import numpy as np
import csv
from io import StringIO
from joblib import dump, load
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker)
from utils import write_failure_file, print_json_object, load_json_object, save_model_artifacts, print_files_in_path
model_artifacts_path = "/opt/ml/model/"
feature_column = "words"
label_column = "label"
preprocessor = None
le = None
# The flask app for serving predictions
app = flask.Flask(__name__)
def load_model():
global preprocessor
global le
if not preprocessor:
preprocessor = load(os.path.join(model_artifacts_path, "model.joblib"))
if not le:
le = load(os.path.join(model_artifacts_path, "label.joblib"))
@app.route('/ping', methods=['GET'])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
load_model()
health = preprocessor is not None and le is not None
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
print("data: ", request.data[:100])
print("cookies: ", request.cookies)
print("headers: ", dict(request.headers))
print("args: ", request.args)
load_model()
content_type = request.headers['Content-Type']
print("Content type", content_type)
accept = request.headers['Accept']
print("Accept", accept)
input_data = request.data.decode()
first_entry = input_data.split('\n', 1)[0].split(',', 1)[0]
print("First entry is: ", first_entry)
df = None
if first_entry == "label" or first_entry.startswith("category_"):
recs = [(row[0], set(row[1:]))
for row in csv.reader(StringIO(input_data))]
if first_entry == "label":
df = pd.DataFrame.from_records(
recs[1:], columns=[label_column, feature_column])
else:
df = pd.DataFrame.from_records(
recs, columns=[label_column, feature_column])
# This is a labelled example, includes the ring label
print("Length indicates that label is included")
else:
print("Length indicates that label is not included.")
# This is an unlabelled example.
recs = [(set(row),) for row in csv.reader(StringIO(input_data))]
df = pd.DataFrame.from_records(recs, columns=[feature_column])
print("merged df", df.head())
features = preprocessor.transform(df["words"])
prediction = None
if label_column in df:
print("label_column in input_data")
labels = le.transform(df[label_column])
# Return the label (as the first column) and the set of features.
prediction = np.insert(features.todense(), 0, labels, axis=1)
else:
print("label_column not in input_data")
# Return only the set of features
prediction = features.todense()
if accept == "application/json":
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return Response(json.dumps(json_output), mimetype=accept)
# TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*'
elif accept == 'text/csv' or accept == '*/*':
return Response(encoders.encode(prediction, "text/csv"), mimetype="text/csv")
else:
raise RuntimeError(
"{} accept type is not supported by this script.".format(accept))
|
from django.apps import AppConfig
class FCMDevicesConfig(AppConfig):
name = "fcm_devices"
verbose_name = "FCM Devices"
|
import os
from textwrap import dedent
import os.path
import shutil
try:
import unittest2 as unittest
except ImportError:
import unittest
from rope.base.exceptions import RopeError, ResourceNotFoundError
from rope.base.fscommands import FileSystemCommands
from rope.base.libutils import path_to_resource
from rope.base.project import Project, NoProject, _realpath
from ropetest import testutils
from rope.base.resourceobserver import FilteredResourceObserver
class ProjectTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.project = testutils.sample_project(foldername='sampleproject',
ropefolder=None)
self.project_root = self.project.address
self._make_sample_project()
self.no_project = NoProject()
def _make_sample_project(self):
self.sample_file = 'sample_file.txt'
self.sample_path = os.path.join(self.project_root, 'sample_file.txt')
if not os.path.exists(self.project_root):
os.mkdir(self.project_root)
self.sample_folder = 'sample_folder'
os.mkdir(os.path.join(self.project_root, self.sample_folder))
sample = open(self.sample_path, 'w')
sample.write('sample text\n')
sample.close()
def tearDown(self):
testutils.remove_project(self.project)
unittest.TestCase.tearDown(self)
def test_project_creation(self):
self.assertEqual(_realpath(self.project_root),
self.project.address)
def test_getting_project_file(self):
project_file = self.project.get_resource(self.sample_file)
self.assertTrue(project_file is not None)
def test_project_file_reading(self):
projectFile = self.project.get_resource(self.sample_file)
self.assertEqual('sample text\n', projectFile.read())
def test_getting_not_existing_project_file(self):
with self.assertRaises(ResourceNotFoundError):
self.project.get_resource('DoesNotExistFile.txt')
def test_writing_in_project_files(self):
project_file = self.project.get_resource(self.sample_file)
project_file.write('another text\n')
self.assertEqual('another text\n', project_file.read())
def test_creating_files(self):
project_file = 'newfile.txt'
self.project.root.create_file(project_file)
newFile = self.project.get_resource(project_file)
self.assertTrue(newFile is not None)
def test_creating_files_that_already_exist(self):
with self.assertRaises(RopeError):
self.project.root.create_file(self.sample_file)
def test_making_root_folder_if_it_does_not_exist(self):
project = Project('sampleproject2')
try:
self.assertTrue(os.path.exists('sampleproject2') and
os.path.isdir('sampleproject2'))
finally:
testutils.remove_project(project)
def test_failure_when_project_root_exists_and_is_a_file(self):
project_root = 'sampleproject2'
try:
open(project_root, 'w').close()
with self.assertRaises(RopeError):
Project(project_root)
finally:
testutils.remove_recursively(project_root)
def test_creating_folders(self):
folderName = 'SampleFolder'
self.project.root.create_folder(folderName)
folderPath = os.path.join(self.project.address, folderName)
self.assertTrue(os.path.exists(folderPath) and
os.path.isdir(folderPath))
def test_making_folder_that_already_exists(self):
folderName = 'SampleFolder'
with self.assertRaises(RopeError):
self.project.root.create_folder(folderName)
self.project.root.create_folder(folderName)
def test_failing_if_creating_folder_while_file_already_exists(self):
folderName = 'SampleFolder'
with self.assertRaises(RopeError):
self.project.root.create_file(folderName)
self.project.root.create_folder(folderName)
def test_creating_file_inside_folder(self):
folder_name = 'sampleFolder'
file_name = 'sample2.txt'
file_path = folder_name + '/' + file_name
parent_folder = self.project.root.create_folder(folder_name)
parent_folder.create_file(file_name)
file = self.project.get_resource(file_path)
file.write('sample notes')
self.assertEqual(file_path, file.path)
self.assertEqual('sample notes',
open(os.path.join(self.project.address,
file_path)).read())
def test_failing_when_creating_file_inside_non_existent_folder(self):
with self.assertRaises(ResourceNotFoundError):
self.project.root.create_file('NonexistentFolder/SomeFile.txt')
def test_nested_directories(self):
folder_name = 'SampleFolder'
parent = self.project.root.create_folder(folder_name)
parent.create_folder(folder_name)
folder_path = os.path.join(self.project.address,
folder_name, folder_name)
self.assertTrue(os.path.exists(folder_path) and
os.path.isdir(folder_path))
def test_removing_files(self):
self.assertTrue(os.path.exists(self.sample_path))
self.project.get_resource(self.sample_file).remove()
self.assertFalse(os.path.exists(self.sample_path))
def test_removing_files_invalidating_in_project_resource_pool(self):
root_folder = self.project.root
my_file = root_folder.create_file('my_file.txt')
my_file.remove()
self.assertFalse(root_folder.has_child('my_file.txt'))
def test_removing_directories(self):
self.assertTrue(os.path.exists(os.path.join(self.project.address,
self.sample_folder)))
self.project.get_resource(self.sample_folder).remove()
self.assertFalse(os.path.exists(os.path.join(self.project.address,
self.sample_folder)))
def test_removing_non_existent_files(self):
with self.assertRaises(ResourceNotFoundError):
self.project.get_resource('NonExistentFile.txt').remove()
def test_removing_nested_files(self):
file_name = self.sample_folder + '/sample_file.txt'
self.project.root.create_file(file_name)
self.project.get_resource(file_name).remove()
self.assertTrue(os.path.exists(os.path.join(self.project.address,
self.sample_folder)))
self.assertTrue(not os.path.exists(os.path.join(self.project.address,
file_name)))
def test_file_get_name(self):
file = self.project.get_resource(self.sample_file)
self.assertEqual(self.sample_file, file.name)
file_name = 'nestedFile.txt'
parent = self.project.get_resource(self.sample_folder)
filePath = self.sample_folder + '/' + file_name
parent.create_file(file_name)
nestedFile = self.project.get_resource(filePath)
self.assertEqual(file_name, nestedFile.name)
def test_folder_get_name(self):
folder = self.project.get_resource(self.sample_folder)
self.assertEqual(self.sample_folder, folder.name)
def test_file_get_path(self):
file = self.project.get_resource(self.sample_file)
self.assertEqual(self.sample_file, file.path)
fileName = 'nestedFile.txt'
parent = self.project.get_resource(self.sample_folder)
filePath = self.sample_folder + '/' + fileName
parent.create_file(fileName)
nestedFile = self.project.get_resource(filePath)
self.assertEqual(filePath, nestedFile.path)
def test_folder_get_path(self):
folder = self.project.get_resource(self.sample_folder)
self.assertEqual(self.sample_folder, folder.path)
def test_is_folder(self):
self.assertTrue(self.project.get_resource(
self.sample_folder).is_folder())
self.assertTrue(not self.project.get_resource(
self.sample_file).is_folder())
def testget_children(self):
children = self.project.get_resource(self.sample_folder).get_children()
self.assertEqual([], children)
def test_nonempty_get_children(self):
file_name = 'nestedfile.txt'
filePath = self.sample_folder + '/' + file_name
parent = self.project.get_resource(self.sample_folder)
parent.create_file(file_name)
children = parent.get_children()
self.assertEqual(1, len(children))
self.assertEqual(filePath, children[0].path)
def test_nonempty_get_children2(self):
file_name = 'nestedfile.txt'
folder_name = 'nestedfolder.txt'
filePath = self.sample_folder + '/' + file_name
folderPath = self.sample_folder + '/' + folder_name
parent = self.project.get_resource(self.sample_folder)
parent.create_file(file_name)
parent.create_folder(folder_name)
children = parent.get_children()
self.assertEqual(2, len(children))
self.assertTrue(filePath == children[0].path or
filePath == children[1].path)
self.assertTrue(folderPath == children[0].path or
folderPath == children[1].path)
def test_does_not_fail_for_permission_denied(self):
bad_dir = os.path.join(self.sample_folder, "bad_dir")
os.makedirs(bad_dir)
self.addCleanup(shutil.rmtree, bad_dir)
os.chmod(bad_dir, 0o000)
try:
parent = self.project.get_resource(self.sample_folder)
parent.get_children()
finally:
os.chmod(bad_dir, 0o755)
def test_getting_files(self):
files = self.project.root.get_files()
self.assertEqual(1, len(files))
self.assertTrue(self.project.get_resource(self.sample_file) in files)
def test_getting_folders(self):
folders = self.project.root.get_folders()
self.assertEqual(1, len(folders))
self.assertTrue(self.project.get_resource(
self.sample_folder) in folders)
def test_nested_folder_get_files(self):
parent = self.project.root.create_folder('top')
parent.create_file('file1.txt')
parent.create_file('file2.txt')
files = parent.get_files()
self.assertEqual(2, len(files))
self.assertTrue(self.project.get_resource('top/file2.txt') in files)
self.assertEqual(0, len(parent.get_folders()))
def test_nested_folder_get_folders(self):
parent = self.project.root.create_folder('top')
parent.create_folder('dir1')
parent.create_folder('dir2')
folders = parent.get_folders()
self.assertEqual(2, len(folders))
self.assertTrue(self.project.get_resource('top/dir1') in folders)
self.assertEqual(0, len(parent.get_files()))
def test_root_folder(self):
root_folder = self.project.root
self.assertEqual(2, len(root_folder.get_children()))
self.assertEqual('', root_folder.path)
self.assertEqual('', root_folder.name)
def test_get_all_files(self):
files = tuple(self.project.get_files())
self.assertEqual(1, len(files))
self.assertEqual(self.sample_file, files[0].name)
def test_get_all_files_after_changing(self):
self.assertEqual(1, len(self.project.get_files()))
myfile = self.project.root.create_file('myfile.txt')
self.assertEqual(2, len(self.project.get_files()))
myfile.move('newfile.txt')
self.assertEqual(2, len(self.project.get_files()))
self.project.get_file('newfile.txt').remove()
self.assertEqual(1, len(self.project.get_files()))
def test_multifile_get_all_files(self):
fileName = 'nestedFile.txt'
parent = self.project.get_resource(self.sample_folder)
parent.create_file(fileName)
files = list(self.project.get_files())
self.assertEqual(2, len(files))
self.assertTrue(fileName == files[0].name or fileName == files[1].name)
def test_ignoring_dot_pyc_files_in_get_files(self):
root = self.project.address
src_folder = os.path.join(root, 'src')
os.mkdir(src_folder)
test_pyc = os.path.join(src_folder, 'test.pyc')
open(test_pyc, 'w').close()
for x in self.project.get_files():
self.assertNotEqual('src/test.pyc', x.path)
def test_folder_creating_files(self):
projectFile = 'NewFile.txt'
self.project.root.create_file(projectFile)
new_file = self.project.get_resource(projectFile)
self.assertTrue(new_file is not None and not new_file.is_folder())
def test_folder_creating_nested_files(self):
project_file = 'NewFile.txt'
parent_folder = self.project.get_resource(self.sample_folder)
parent_folder.create_file(project_file)
new_file = self.project.get_resource(self.sample_folder
+ '/' + project_file)
self.assertTrue(new_file is not None and not new_file.is_folder())
def test_folder_creating_files2(self):
projectFile = 'newfolder'
self.project.root.create_folder(projectFile)
new_folder = self.project.get_resource(projectFile)
self.assertTrue(new_folder is not None and new_folder.is_folder())
def test_folder_creating_nested_files2(self):
project_file = 'newfolder'
parent_folder = self.project.get_resource(self.sample_folder)
parent_folder.create_folder(project_file)
new_folder = self.project.get_resource(self.sample_folder
+ '/' + project_file)
self.assertTrue(new_folder is not None and new_folder.is_folder())
def test_folder_get_child(self):
folder = self.project.root
folder.create_file('myfile.txt')
folder.create_folder('myfolder')
self.assertEqual(self.project.get_resource('myfile.txt'),
folder.get_child('myfile.txt'))
self.assertEqual(self.project.get_resource('myfolder'),
folder.get_child('myfolder'))
def test_folder_get_child_nested(self):
root = self.project.root
folder = root.create_folder('myfolder')
folder.create_file('myfile.txt')
folder.create_folder('myfolder')
self.assertEqual(self.project.get_resource('myfolder/myfile.txt'),
folder.get_child('myfile.txt'))
self.assertEqual(self.project.get_resource('myfolder/myfolder'),
folder.get_child('myfolder'))
def test_project_root_is_root_folder(self):
self.assertEqual('', self.project.root.path)
def test_moving_files(self):
root_folder = self.project.root
my_file = root_folder.create_file('my_file.txt')
my_file.move('my_other_file.txt')
self.assertFalse(my_file.exists())
root_folder.get_child('my_other_file.txt')
def test_moving_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder('my_folder')
my_file = my_folder.create_file('my_file.txt')
my_folder.move('new_folder')
self.assertFalse(root_folder.has_child('my_folder'))
self.assertFalse(my_file.exists())
self.assertTrue(root_folder.get_child('new_folder') is not None)
def test_moving_destination_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder('my_folder')
my_file = root_folder.create_file('my_file.txt')
my_file.move('my_folder')
self.assertFalse(root_folder.has_child('my_file.txt'))
self.assertFalse(my_file.exists())
my_folder.get_child('my_file.txt')
def test_moving_files_and_resource_objects(self):
root_folder = self.project.root
my_file = root_folder.create_file('my_file.txt')
old_hash = hash(my_file)
my_file.move('my_other_file.txt')
self.assertEqual(old_hash, hash(my_file))
def test_file_encoding_reading(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = (b'# -*- coding: utf-8 -*-\n' +
br'#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n').decode('utf8')
file = open(sample_file.real_path, 'wb')
file.write(contents.encode('utf-8'))
file.close()
self.assertEqual(contents, sample_file.read())
def test_file_encoding_writing(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = (b'# -*- coding: utf-8 -*-\n' +
br'\N{LATIN SMALL LETTER I WITH DIAERESIS}\n').decode('utf8')
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
def test_using_utf8_when_writing_in_case_of_errors(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = br'\n\N{LATIN SMALL LETTER I WITH DIAERESIS}\n'.decode('utf8')
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
def test_encoding_declaration_in_the_second_line(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = b'\n# -*- coding: latin-1 -*-\n\xa9\n'
file = open(sample_file.real_path, 'wb')
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read().encode('latin-1'))
def test_not_an_encoding_declaration(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = b"def my_method(self, encoding='latin-1'):\n var = {}\n\xc2\xa9\n"
file = open(sample_file.real_path, 'wb')
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read().encode('utf-8'))
self.assertNotEqual(contents, sample_file.read().encode('latin-1'))
def test_read_bytes(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = b'\n# -*- coding: latin-1 -*-\n\xa9\n'
file = open(sample_file.real_path, 'wb')
file.write(contents)
file.close()
self.assertEqual(contents, sample_file.read_bytes())
# TODO: Detecting utf-16 encoding
def xxx_test_using_utf16(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = b'# -*- coding: utf-16 -*-\n# This is a sample file ...\n'
file = open(sample_file.real_path, 'w')
file.write(contents.encode('utf-16'))
file.close()
sample_file.write(contents)
self.assertEqual(contents, sample_file.read())
# XXX: supporting utf_8_sig
def xxx_test_file_encoding_reading_for_notepad_styles(self):
sample_file = self.project.root.create_file('my_file.txt')
contents = u'#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n'
file = open(sample_file.real_path, 'w')
# file.write('\xef\xbb\xbf')
file.write(contents.encode('utf-8-sig'))
file.close()
self.assertEqual(contents, sample_file.read())
def test_using_project_get_file(self):
myfile = self.project.get_file(self.sample_file)
self.assertTrue(myfile.exists())
def test_using_file_create(self):
myfile = self.project.get_file('myfile.txt')
self.assertFalse(myfile.exists())
myfile.create()
self.assertTrue(myfile.exists())
self.assertFalse(myfile.is_folder())
def test_using_folder_create(self):
myfolder = self.project.get_folder('myfolder')
self.assertFalse(myfolder.exists())
myfolder.create()
self.assertTrue(myfolder.exists())
self.assertTrue(myfolder.is_folder())
def test_exception_when_creating_twice(self):
with self.assertRaises(RopeError):
myfile = self.project.get_file('myfile.txt')
myfile.create()
myfile.create()
def test_exception_when_parent_does_not_exist(self):
with self.assertRaises(ResourceNotFoundError):
myfile = self.project.get_file('myfolder/myfile.txt')
myfile.create()
def test_simple_path_to_resource(self):
myfile = self.project.root.create_file('myfile.txt')
self.assertEqual(myfile, path_to_resource(self.project,
myfile.real_path))
self.assertEqual(myfile, path_to_resource(
self.project, myfile.real_path, type='file'))
myfolder = self.project.root.create_folder('myfolder')
self.assertEqual(myfolder, path_to_resource(self.project,
myfolder.real_path))
self.assertEqual(myfolder, path_to_resource(
self.project, myfolder.real_path, type='folder'))
@testutils.skipNotPOSIX()
def test_ignoring_symlinks_inside_project(self):
project2 = testutils.sample_project(folder_name='sampleproject2')
mod = project2.root.create_file('mod.py')
try:
path = os.path.join(self.project.address, 'linkedfile.txt')
os.symlink(mod.real_path, path)
files = self.project.root.get_files()
self.assertEqual(1, len(files))
finally:
testutils.remove_project(project2)
def test_getting_empty_source_folders(self):
self.assertEqual([], self.project.get_source_folders())
def test_root_source_folder(self):
self.project.root.create_file('sample.py')
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_root_source_folder2(self):
self.project.root.create_file('mod1.py')
self.project.root.create_file('mod2.py')
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_src_source_folder(self):
src = self.project.root.create_folder('src')
src.create_file('sample.py')
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(self.project.get_resource('src') in source_folders)
def test_packages(self):
src = self.project.root.create_folder('src')
pkg = src.create_folder('package')
pkg.create_file('__init__.py')
source_folders = self.project.get_source_folders()
self.assertEqual(1, len(source_folders))
self.assertTrue(src in source_folders)
def test_multi_source_folders(self):
src = self.project.root.create_folder('src')
package = src.create_folder('package')
package.create_file('__init__.py')
test = self.project.root.create_folder('test')
test.create_file('alltests.py')
source_folders = self.project.get_source_folders()
self.assertEqual(2, len(source_folders))
self.assertTrue(src in source_folders)
self.assertTrue(test in source_folders)
def test_multi_source_folders2(self):
testutils.create_module(self.project, 'mod1')
src = self.project.root.create_folder('src')
package = testutils.create_package(self.project, 'package', src)
testutils.create_module(self.project, 'mod2', package)
source_folders = self.project.get_source_folders()
self.assertEqual(2, len(source_folders))
self.assertTrue(self.project.root in source_folders and
src in source_folders)
class ResourceObserverTest(unittest.TestCase):
def setUp(self):
super(ResourceObserverTest, self).setUp()
self.project = testutils.sample_project()
def tearDown(self):
testutils.remove_project(self.project)
super(ResourceObserverTest, self).tearDown()
def test_resource_change_observer(self):
sample_file = self.project.root.create_file('my_file.txt')
sample_file.write('a sample file version 1')
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
sample_file.write('a sample file version 2')
self.assertEqual(1, sample_observer.change_count)
self.assertEqual(sample_file, sample_observer.last_changed)
def test_resource_change_observer_after_removal(self):
sample_file = self.project.root.create_file('my_file.txt')
sample_file.write('text')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[sample_file]))
sample_file.remove()
self.assertEqual(1, sample_observer.change_count)
self.assertEqual(sample_file, sample_observer.last_removed)
def test_resource_change_observer2(self):
sample_file = self.project.root.create_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
self.project.remove_observer(sample_observer)
sample_file.write('a sample file version 2')
self.assertEqual(0, sample_observer.change_count)
def test_resource_change_observer_for_folders(self):
root_folder = self.project.root
my_folder = root_folder.create_folder('my_folder')
my_folder_observer = _SampleObserver()
root_folder_observer = _SampleObserver()
self.project.add_observer(
FilteredResourceObserver(my_folder_observer, [my_folder]))
self.project.add_observer(
FilteredResourceObserver(root_folder_observer, [root_folder]))
my_file = my_folder.create_file('my_file.txt')
self.assertEqual(1, my_folder_observer.change_count)
my_file.move('another_file.txt')
self.assertEqual(2, my_folder_observer.change_count)
self.assertEqual(1, root_folder_observer.change_count)
self.project.get_resource('another_file.txt').remove()
self.assertEqual(2, my_folder_observer.change_count)
self.assertEqual(2, root_folder_observer.change_count)
def test_resource_change_observer_after_moving(self):
sample_file = self.project.root.create_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(sample_observer)
sample_file.move('new_file.txt')
self.assertEqual(1, sample_observer.change_count)
self.assertEqual((sample_file,
self.project.get_resource('new_file.txt')),
sample_observer.last_moved)
def test_revalidating_files(self):
root = self.project.root
my_file = root.create_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[my_file]))
os.remove(my_file.real_path)
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_removed)
self.assertEqual(1, sample_observer.change_count)
def test_revalidating_files_and_no_changes2(self):
root = self.project.root
my_file = root.create_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[my_file]))
self.project.validate(root)
self.assertEqual(None, sample_observer.last_moved)
self.assertEqual(0, sample_observer.change_count)
def test_revalidating_folders(self):
root = self.project.root
my_folder = root.create_folder('myfolder')
my_file = my_folder.create_file('myfile.txt') # noqa
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[my_folder]))
testutils.remove_recursively(my_folder.real_path)
self.project.validate(root)
self.assertEqual(my_folder, sample_observer.last_removed)
self.assertEqual(1, sample_observer.change_count)
def test_removing_and_adding_resources_to_filtered_observer(self):
my_file = self.project.root.create_file('my_file.txt')
sample_observer = _SampleObserver()
filtered_observer = FilteredResourceObserver(sample_observer)
self.project.add_observer(filtered_observer)
my_file.write('1')
self.assertEqual(0, sample_observer.change_count)
filtered_observer.add_resource(my_file)
my_file.write('2')
self.assertEqual(1, sample_observer.change_count)
filtered_observer.remove_resource(my_file)
my_file.write('3')
self.assertEqual(1, sample_observer.change_count)
def test_validation_and_changing_files(self):
my_file = self.project.root.create_file('my_file.txt')
sample_observer = _SampleObserver()
timekeeper = _MockChangeIndicator()
filtered_observer = FilteredResourceObserver(
sample_observer, [my_file], timekeeper=timekeeper)
self.project.add_observer(filtered_observer)
self._write_file(my_file.real_path)
timekeeper.set_indicator(my_file, 1)
self.project.validate(self.project.root)
self.assertEqual(1, sample_observer.change_count)
def test_validation_and_changing_files2(self):
my_file = self.project.root.create_file('my_file.txt')
sample_observer = _SampleObserver()
timekeeper = _MockChangeIndicator()
self.project.add_observer(FilteredResourceObserver(
sample_observer, [my_file],
timekeeper=timekeeper))
timekeeper.set_indicator(my_file, 1)
my_file.write('hey')
self.assertEqual(1, sample_observer.change_count)
self.project.validate(self.project.root)
self.assertEqual(1, sample_observer.change_count)
def test_not_reporting_multiple_changes_to_folders(self):
root = self.project.root
file1 = root.create_file('file1.txt')
file2 = root.create_file('file2.txt')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(
sample_observer, [root, file1, file2]))
os.remove(file1.real_path)
os.remove(file2.real_path)
self.assertEqual(0, sample_observer.change_count)
self.project.validate(self.project.root)
self.assertEqual(3, sample_observer.change_count)
def _write_file(self, path):
my_file = open(path, 'w')
my_file.write('\n')
my_file.close()
def test_moving_and_being_interested_about_a_folder_and_a_child(self):
my_folder = self.project.root.create_folder('my_folder')
my_file = my_folder.create_file('my_file.txt')
sample_observer = _SampleObserver()
filtered_observer = FilteredResourceObserver(
sample_observer, [my_folder, my_file])
self.project.add_observer(filtered_observer)
my_folder.move('new_folder')
self.assertEqual(2, sample_observer.change_count)
def test_contains_for_folders(self):
folder1 = self.project.root.create_folder('folder')
folder2 = self.project.root.create_folder('folder2')
self.assertFalse(folder1.contains(folder2))
def test_validating_when_created(self):
root = self.project.root
my_file = self.project.get_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[my_file]))
open(my_file.real_path, 'w').close()
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_created)
self.assertEqual(1, sample_observer.change_count)
def test_validating_twice_when_created(self):
root = self.project.root
my_file = self.project.get_file('my_file.txt')
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[my_file]))
open(my_file.real_path, 'w').close()
self.project.validate(root)
self.project.validate(root)
self.assertEqual(my_file, sample_observer.last_created)
self.assertEqual(1, sample_observer.change_count)
def test_changes_and_adding_resources(self):
root = self.project.root # noqa
file1 = self.project.get_file('file1.txt')
file2 = self.project.get_file('file2.txt')
file1.create()
sample_observer = _SampleObserver()
self.project.add_observer(FilteredResourceObserver(sample_observer,
[file1, file2]))
file1.move(file2.path)
self.assertEqual(2, sample_observer.change_count)
self.assertEqual(file2, sample_observer.last_created)
self.assertEqual((file1, file2), sample_observer.last_moved)
def test_validating_get_files_list(self):
root = self.project.root # noqa
self.assertEqual(0, len(self.project.get_files()))
file = open(os.path.join(self.project.address, 'myfile.txt'), 'w')
file.close()
self.project.validate()
self.assertEqual(1, len(self.project.get_files()))
def test_clear_observered_resources_for_filtered_observers(self):
sample_file = self.project.root.create_file('myfile.txt')
sample_observer = _SampleObserver()
filtered = FilteredResourceObserver(sample_observer)
self.project.add_observer(filtered)
filtered.add_resource(sample_file)
filtered.clear_resources()
sample_file.write('1')
self.assertEqual(0, sample_observer.change_count)
class _MockChangeIndicator(object):
def __init__(self):
self.times = {}
def set_indicator(self, resource, time):
self.times[resource] = time
def get_indicator(self, resource):
return self.times.get(resource, 0)
class _SampleObserver(object):
def __init__(self):
self.change_count = 0
self.last_changed = None
self.last_moved = None
self.last_created = None
self.last_removed = None
def resource_changed(self, resource):
self.last_changed = resource
self.change_count += 1
def resource_moved(self, resource, new_resource):
self.last_moved = (resource, new_resource)
self.change_count += 1
def resource_created(self, resource):
self.last_created = resource
self.change_count += 1
def resource_removed(self, resource):
self.last_removed = resource
self.change_count += 1
class OutOfProjectTest(unittest.TestCase):
def setUp(self):
super(OutOfProjectTest, self).setUp()
self.test_directory = 'temp_test_directory'
testutils.remove_recursively(self.test_directory)
os.mkdir(self.test_directory)
self.project = testutils.sample_project()
self.no_project = NoProject()
def tearDown(self):
testutils.remove_project(self.project)
testutils.remove_recursively(self.test_directory)
super(OutOfProjectTest, self).tearDown()
def test_simple_out_of_project_file(self):
sample_file_path = os.path.join(self.test_directory, 'sample.txt')
sample_file = open(sample_file_path, 'w')
sample_file.write('sample content\n')
sample_file.close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual('sample content\n', sample_resource.read())
def test_simple_out_of_project_folder(self):
sample_folder_path = os.path.join(self.test_directory, 'sample_folder')
os.mkdir(sample_folder_path)
sample_folder = self.no_project.get_resource(sample_folder_path)
self.assertEqual([], sample_folder.get_children())
sample_file_path = os.path.join(sample_folder_path, 'sample.txt')
open(sample_file_path, 'w').close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual(sample_resource, sample_folder.get_children()[0])
def test_using_absolute_path(self):
sample_file_path = os.path.join(self.test_directory, 'sample.txt')
open(sample_file_path, 'w').close()
normal_sample_resource = self.no_project.get_resource(sample_file_path)
absolute_sample_resource = \
self.no_project.get_resource(os.path.abspath(sample_file_path))
self.assertEqual(normal_sample_resource, absolute_sample_resource)
def test_folder_get_child(self):
sample_folder_path = os.path.join(self.test_directory, 'sample_folder')
os.mkdir(sample_folder_path)
sample_folder = self.no_project.get_resource(sample_folder_path)
self.assertEqual([], sample_folder.get_children())
sample_file_path = os.path.join(sample_folder_path, 'sample.txt')
open(sample_file_path, 'w').close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertTrue(sample_folder.has_child('sample.txt'))
self.assertFalse(sample_folder.has_child('doesnothave.txt'))
self.assertEqual(sample_resource,
sample_folder.get_child('sample.txt'))
def test_out_of_project_files_and_path_to_resource(self):
sample_file_path = os.path.join(self.test_directory, 'sample.txt')
sample_file = open(sample_file_path, 'w')
sample_file.write('sample content\n')
sample_file.close()
sample_resource = self.no_project.get_resource(sample_file_path)
self.assertEqual(sample_resource,
path_to_resource(self.project, sample_file_path))
class _MockFSCommands(object):
def __init__(self):
self.log = ''
self.fscommands = FileSystemCommands()
def create_file(self, path):
self.log += 'create_file '
self.fscommands.create_file(path)
def create_folder(self, path):
self.log += 'create_folder '
self.fscommands.create_folder(path)
def move(self, path, new_location):
self.log += 'move '
self.fscommands.move(path, new_location)
def remove(self, path):
self.log += 'remove '
self.fscommands.remove(path)
class RopeFolderTest(unittest.TestCase):
def setUp(self):
super(RopeFolderTest, self).setUp()
self.project = None
def tearDown(self):
if self.project:
testutils.remove_project(self.project)
super(RopeFolderTest, self).tearDown()
def test_none_project_rope_folder(self):
self.project = testutils.sample_project(ropefolder=None)
self.assertTrue(self.project.ropefolder is None)
def test_getting_project_rope_folder(self):
self.project = testutils.sample_project(ropefolder='.ropeproject')
self.assertTrue(self.project.ropefolder.exists())
self.assertTrue('.ropeproject', self.project.ropefolder.path)
def test_setting_ignored_resources(self):
self.project = testutils.sample_project(
ignored_resources=['myfile.txt'])
myfile = self.project.get_file('myfile.txt')
file2 = self.project.get_file('file2.txt')
self.assertTrue(self.project.is_ignored(myfile))
self.assertFalse(self.project.is_ignored(file2))
def test_ignored_folders(self):
self.project = testutils.sample_project(ignored_resources=['myfolder'])
myfolder = self.project.root.create_folder('myfolder')
self.assertTrue(self.project.is_ignored(myfolder))
myfile = myfolder.create_file('myfile.txt')
self.assertTrue(self.project.is_ignored(myfile))
def test_ignored_resources_and_get_files(self):
self.project = testutils.sample_project(
ignored_resources=['myfile.txt'], ropefolder=None)
myfile = self.project.get_file('myfile.txt')
self.assertEqual(0, len(self.project.get_files()))
myfile.create()
self.assertEqual(0, len(self.project.get_files()))
def test_ignored_resources_and_get_files2(self):
self.project = testutils.sample_project(
ignored_resources=['myfile.txt'], ropefolder=None)
myfile = self.project.root.create_file('myfile.txt') # noqa
self.assertEqual(0, len(self.project.get_files()))
def test_setting_ignored_resources_patterns(self):
self.project = testutils.sample_project(ignored_resources=['m?file.*'])
myfile = self.project.get_file('myfile.txt')
file2 = self.project.get_file('file2.txt')
self.assertTrue(self.project.is_ignored(myfile))
self.assertFalse(self.project.is_ignored(file2))
def test_star_should_not_include_slashes(self):
self.project = testutils.sample_project(ignored_resources=['f*.txt'])
folder = self.project.root.create_folder('folder')
file1 = folder.create_file('myfile.txt')
file2 = folder.create_file('file2.txt')
self.assertFalse(self.project.is_ignored(file1))
self.assertTrue(self.project.is_ignored(file2))
def test_normal_fscommands(self):
fscommands = _MockFSCommands()
self.project = testutils.sample_project(fscommands=fscommands)
myfile = self.project.get_file('myfile.txt')
myfile.create()
self.assertTrue('create_file ', fscommands.log)
def test_fscommands_and_ignored_resources(self):
fscommands = _MockFSCommands()
self.project = testutils.sample_project(
fscommands=fscommands,
ignored_resources=['myfile.txt'], ropefolder=None)
myfile = self.project.get_file('myfile.txt')
myfile.create()
self.assertEqual('', fscommands.log)
def test_ignored_resources_and_prefixes(self):
self.project = testutils.sample_project(
ignored_resources=['.hg'])
myfile = self.project.root.create_file('.hgignore')
self.assertFalse(self.project.is_ignored(myfile))
def test_loading_config_dot_py(self):
self.project = testutils.sample_project(ropefolder='.ropeproject')
config = self.project.get_file('.ropeproject/config.py')
if not config.exists():
config.create()
config.write('def set_prefs(prefs):\n'
' prefs["ignored_resources"] = ["myfile.txt"]\n'
'def project_opened(project):\n'
' project.root.create_file("loaded")\n')
self.project.close()
self.project = Project(self.project.address, ropefolder='.ropeproject')
self.assertTrue(self.project.get_file('loaded').exists())
myfile = self.project.get_file('myfile.txt')
self.assertTrue(self.project.is_ignored(myfile))
def test_ignoring_syntax_errors(self):
self.project = testutils.sample_project(ropefolder=None,
ignore_syntax_errors=True)
mod = testutils.create_module(self.project, 'mod')
mod.write('xyz print')
pymod = self.project.get_pymodule(mod) # noqa
def test_compressed_history(self):
self.project = testutils.sample_project(compress_history=True)
mod = testutils.create_module(self.project, 'mod')
mod.write('')
def test_compressed_objectdb(self):
self.project = testutils.sample_project(compress_objectdb=True)
mod = testutils.create_module(self.project, 'mod')
self.project.pycore.analyze_module(mod)
def test_nested_dot_ropeproject_folder(self):
self.project = testutils.sample_project(ropefolder='.f1/f2')
ropefolder = self.project.ropefolder
self.assertEqual('.f1/f2', ropefolder.path)
self.assertTrue(ropefolder.exists())
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(ProjectTest))
result.addTests(unittest.makeSuite(ResourceObserverTest))
result.addTests(unittest.makeSuite(OutOfProjectTest))
result.addTests(unittest.makeSuite(RopeFolderTest))
return result
if __name__ == '__main__':
unittest.main()
|
import re
YT_PATTERN = r'((youtu\.be/|(www\.)?youtube\.com/watch\?v=)(\w{11}))'
def parse_links(text: str) -> list:
matches = re.findall(YT_PATTERN, text)
return list(map(lambda match: match[0], matches))
|
"""
Aggregation model
:author: Angelo Cutaia
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard Library
from typing import Optional
# Third Party
from pydantic import validator
# Internal
from ..model import OrjsonModel
from ..track import AggregationType
# --------------------------------------------------------------------------------------------
class AggregationExtract(OrjsonModel):
space_aggregation: Optional[int] = None
time_aggregation: Optional[int] = None
type_aggregation: Optional[AggregationType] = None
@validator("type_aggregation", always=True)
def type_aggregation_must_be_coherent(cls, v, values):
if v == AggregationType.time:
if values["time_aggregation"] and values["space_aggregation"] is None:
return v
else:
ValueError(
"type_aggregation is time so time_aggregation must be set and space_aggregation unset"
)
elif v == AggregationType.space:
if values["space_aggregation"] and values["time_aggregation"] is None:
return v
else:
ValueError(
"type_aggregation is space so space_aggregation must be set and time_aggregation unset"
)
else:
if values["space_aggregation"] == values["time_aggregation"] and v is None:
return v
else:
ValueError("type_aggregation is not set")
def __init__(self, **data):
super().__init__(**data)
|
from kombu import Queue
QUEUES = { # RabbitMQ Queue definitions, they'll be declared at gunicorn start time
"bot_events": Queue("bot_events", durable=True)
}
|
"""
Load configuration data from environment variables.
"""
import os
import functools
from urllib.parse import urljoin
@functools.lru_cache(maxsize=1)
def get_config():
"""Load environment configuration data."""
spec_path = os.environ.get('SPEC_PATH', '/spec') # /spec
spec_url = 'https://api.github.com/repos/kbase/relation_engine_spec'
spec_release_url = os.environ.get('SPEC_RELEASE_URL')
spec_release_path = os.environ.get('SPEC_RELEASE_PATH')
kbase_endpoint = os.environ.get('KBASE_ENDPOINT', 'https://ci.kbase.us/services')
auth_url = os.environ.get('KBASE_AUTH_URL', urljoin(kbase_endpoint + '/', 'auth'))
workspace_url = os.environ.get('KBASE_WORKSPACE_URL', urljoin(kbase_endpoint + '/', 'ws'))
db_url = os.environ.get('DB_URL', 'http://arangodb:8529')
db_name = os.environ.get('DB_NAME', '_system')
db_user = os.environ.get('DB_USER', 'root')
db_pass = os.environ.get('DB_PASS', '')
db_readonly_user = os.environ.get('DB_READONLY_USER', db_user)
db_readonly_pass = os.environ.get('DB_READONLY_PASS', db_pass)
api_url = db_url + '/_db/' + db_name + '/_api'
return {
'auth_url': auth_url,
'workspace_url': workspace_url,
'kbase_endpoint': kbase_endpoint,
'db_url': db_url,
'api_url': api_url,
'db_name': db_name,
'db_user': db_user,
'db_pass': db_pass,
'db_readonly_user': db_readonly_user,
'db_readonly_pass': db_readonly_pass,
'spec_url': spec_url,
'spec_release_url': spec_release_url,
'spec_release_path': spec_release_path,
'spec_paths': {
'root': spec_path, # /spec
'release_id': os.path.join(spec_path, '.release_id'),
'collections': os.path.join(spec_path, 'collections'), # /spec/collections
'datasets': os.path.join(spec_path, 'datasets'),
'data_sources': os.path.join(spec_path, 'data_sources'),
'stored_queries': os.path.join(spec_path, 'stored_queries'),
'views': os.path.join(spec_path, 'views'),
}
}
|
from TTS.text2speech import tts_class
from multiprocessing import Process
import faiss
import time
import sqlite3
import csv
import random
import copy
import tensorflow_hub as hub
import tensorflow_text
import math
import numpy as np
import pickle
from Retriever.Retrieve import retrieve
import Utils.functions as utils
from ReRanker.rerank import rank_and_choose
from Generator.generator import generate as DialoGPT_Generate
from Classifier.model.dialog_acts import Encoder as Classifier
from Sentence_Encoder.meta_response_encoder_fast import encode as response_encode
from Sentence_Encoder.meta_query_encoder_fast import encode as query_encode
import Sentence_Encoder.encoder_client as encoder_client
import tensorflow as tf
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import torch.nn.functional as F
import torch.nn as nn
import torch as T
import os
import sys
import argparse
import logging
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.basicConfig(level=logging.CRITICAL)
parser = argparse.ArgumentParser(description="Chatbot")
parser.add_argument('--voice', dest='voice', action='store_true')
parser.add_argument('--no-voice', dest='voice', action='store_false')
parser.set_defaults(voice=True)
flags = parser.parse_args()
device = "cuda"
# LOAD DATABASE
with open("Retriever/Faiss_index/thread_idx.pkl", 'rb') as fp:
idx = pickle.load(fp)
index = faiss.read_index('Retriever/Faiss_index/large.index')
# LOAD DATABASE
conn = sqlite3.connect('Retriever/Database/reddit.db')
c = conn.cursor()
# LOAD SCRIPTS
with open('Scripted/Processed_Scripts/Bot_Profile.pkl', 'rb') as fp:
bot_profile = pickle.load(fp)
bot_queries = [k for k, v in bot_profile.items()]
with open('Scripted/Processed_Scripts/Chatterbot.pkl', 'rb') as fp:
chatterbot = pickle.load(fp)
chatterbot_queries = [k for k, v in chatterbot.items()]
# LOAD SCRIPT EMBEDDINGS
with open('Scripted/Processed_Scripts/embedded_bot_queries.pkl', 'rb') as fp:
bot_queries_embd = pickle.load(fp)
with open('Scripted/Processed_Scripts/embedded_chatterbot_queries.pkl', 'rb') as fp:
chatterbot_queries_embd = pickle.load(fp)
# Load Dialog Acts Classifer
with open("Classifier/data/processed_data.pkl", "rb") as fp:
data = pickle.load(fp)
labels2idx = data["labels2idx"]
idx2labels = {v: k for k, v in labels2idx.items()}
# Load TTS model
with T.no_grad():
text2speech = tts_class()
with T.no_grad():
dialog_act_classifier = Classifier(
D=bot_queries_embd.shape[-1], classes_num=len(labels2idx)).cuda()
checkpoint = T.load("Classifier/Model_Backup/model.pt")
dialog_act_classifier.load_state_dict(checkpoint['model_state_dict'])
dialog_act_classifier = dialog_act_classifier.eval()
# LOAD DialoGPT Generator
with T.no_grad():
tokenizer = GPT2Tokenizer.from_pretrained('Generator/DialoGPT/Configs/')
weights = T.load('Generator/DialoGPT/Parameters/medium_ft.pkl')
weights_reverse = T.load('Generator/DialoGPT/Parameters/small_reverse.pkl')
cfg = GPT2Config.from_json_file('Generator/DialoGPT/Configs/config.json')
model = GPT2LMHeadModel(cfg)
model_reverse = GPT2LMHeadModel(cfg)
# fix misused key value
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight", None)
weights_reverse["lm_head.weight"] = weights_reverse["lm_head.decoder.weight"]
weights_reverse.pop("lm_head.decoder.weight", None)
model.load_state_dict(weights)
model.to('cuda')
model.eval()
model_reverse.load_state_dict(weights_reverse)
model_reverse.to('cuda')
model_reverse.eval()
with tf.device("/cpu:0"):
# sess = tf.InteractiveSession(graph=tf.Graph())
# LOAD STUFF
# LOAD SENTENCE ENCODERS
# Hub Models
ConvRT_model = encoder_client.EncoderClient(
"Sentence_Encoder/Embeddings/ConvRT", use_extra_context=True)
USE_QA_model = hub.load("Sentence_Encoder/Embeddings/USE_QA/")
# %%
command_codes = ["<PASS>", "<JOKE>", "<GENERATE>",
"<INITIATE>", "<TIL>", "<STORY>", "<SHOWER>", "<STOP>"]
code_map = {"<INITIATE>": ["Scripted/Random_Reddit_Data/nostupidq.csv",
"Scripted/Random_Reddit_Data/jokesq.csv",
"Scripted/Random_Reddit_Data/showerthoughtsq.csv",
"Scripted/Random_Reddit_Data/tilq.csv"],
"<TIL>": ["Scripted/Random_Reddit_Data/tilq.csv"],
"<SHOWER>": ["Scripted/Random_Reddit_Data/showerthoughtsq.csv"],
"<STORY>": ["Scripted/Random_Reddit_Data/writingpromptsa.csv"],
"<JOKE>": ["Scripted/Random_Reddit_Data/jokesq.csv"]}
def random_response(candidates, conversation_history, p=None):
loop = 5
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i = 0
while response in conversation_history:
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i += 1
if i > loop:
break
return response
# %%
def load_random_reddit(directory, conversation_history):
candidates = []
with open(directory, newline='') as csvfile:
csv_reader = csv.DictReader(csvfile)
for i, row in enumerate(csv_reader):
if 'writing' in directory:
parent_id = str(row['parent_id'])[3:]
thread_id = str(row['link_id'])[3:]
if parent_id == thread_id:
candidate = str(row["body"])
else:
candidate = str(row["title"])
if 'joke' in directory:
candidate += ".... "+str(row['selftext'])
candidates.append(candidate)
return random_response(candidates, conversation_history)
# extract top candidates (queries or responses)
def top_candidates(candidates, scores, top=1):
sorted_score_idx = np.flip(np.argsort(scores), axis=-1)
candidates = [candidates[i] for i in sorted_score_idx.tolist()]
scores = [scores[i] for i in sorted_score_idx.tolist()]
return candidates[0:top], scores[0:top], sorted_score_idx.tolist()
# %%
def generate(texts, past):
candidates, _ = DialoGPT_Generate(texts, model, tokenizer)
return candidates, past
# START DOING STUFF
conversation_history = []
past = None
stop_flag = 0
print("\n")
while True:
utterance = input("Say Something: ") # ,hello how are ya today"
response_code = ""
retrieved_candidates = []
utils.delay_print("\nThinking......")
candidates = []
temp_candidates = []
temp_scores = []
if not conversation_history:
query_context = []
response_context = [""]
else:
if len(conversation_history) > 5:
truncated_history = copy.deepcopy(conversation_history[-5:])
else:
truncated_history = copy.deepcopy(conversation_history)
response_context = [conversation_history[-1]]
# ConveRT needs reversed Context, not sure about USE QA but assuming it's not reverse
query_context = [stuff for stuff in truncated_history]
query_encoding = query_encode([utterance], USE_QA_model, ConvRT_model, [query_context])
if conversation_history:
if len(conversation_history) > 5:
truncated_history = conversation_history[-5:]
else:
truncated_history = conversation_history
generated_responses, past = generate(truncated_history+[utterance], past)
else:
generated_responses, past = generate([utterance], past)
bot_cosine_scores = utils.cosine_similarity_nd(query_encoding, bot_queries_embd)
bot_queries_, bot_cosine_scores_, _ = top_candidates(bot_queries, bot_cosine_scores, top=1)
active_codes = []
bot_candidates = bot_profile[bot_queries_[0]]
filtered_bot_candidates = []
for candidate in bot_candidates:
flag = 0
for code in command_codes:
if code in candidate:
active_codes.append(code)
candidate = candidate.replace(code, "")
filtered_bot_candidates.append(candidate)
flag = 1
break
if flag == 0:
candidates.append(candidate)
filtered_bot_candidates.append(candidate)
active_codes.append("")
with T.no_grad():
logits = dialog_act_classifier(T.tensor(query_encoding).to(device))
_, sorted_idx = T.sort(logits, dim=-1, descending=True)
sorted_idx = sorted_idx.squeeze(0)
sorted_idx = sorted_idx[0:2].cpu().tolist()
labels = [idx2labels[i] for i in sorted_idx]
print("\nClassified Dialog Acts: {}\n".format(", ".join(labels)))
# print(labels)
"""
Possible Dialog Acts:
['nonsense', 'dev_command', 'open_question_factual', 'appreciation', 'other_answers', 'statement', \
'respond_to_apology', 'pos_answer', 'closing', 'comment', 'neg_answer', 'yes_no_question', 'command', \
'hold', 'NULL', 'back-channeling', 'abandon', 'opening', 'other', 'complaint', 'opinion', 'apology', \
'thanking', 'open_question_opinion']
"""
if bot_cosine_scores_[0] >= 0.75:
response, id = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
filtered_bot_candidates,
response_context,
conversation_history)
code = active_codes[id]
if code in code_map:
response_code = "(Reddit JOKE/WRITING/TIL ETC.)"
directories = code_map[code]
directory = random.choice(directories)
response += " "+load_random_reddit(directory, conversation_history)
elif code == "<GENERATE>":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
elif code == "<STOP>":
stop_flag = 1
elif stop_flag != 1:
mode = "DEFAULT"
bias = None
if 'open_question_factual' in labels \
or ('yes_no_question' in labels and 'NULL' not in labels) \
or 'open_question_opinion' in labels or 'command' in labels:
bias = 0.07 # biases towards retrieval
elif "apology" in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "No need to apologize.",
"No worries.", "You are forgiven"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
elif "abandon" in labels or "nonsense" in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["what?", "Can you rephrase what you mean?",
"What do you mean exactly?"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'hold' in labels:
mode = "BREAK"
candidates = ["Do you want to add something more?",
"I think you want to say something more."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'closing' in labels:
mode = "BREAK"
candidates = ["Nice talking to you.", "Goodbye.", "See you later."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
stop_flag = 1
elif 'opening' in labels:
mode = "BREAK"
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
stop_flag = 1
elif 'thanking' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["No need to mention", "You are welcome."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'apology' in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "Apology granted",
"No Worries!", "No need to apologize."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'response_to_apology' in labels\
or 'pos_answer' in labels or 'neg_answer' in labels\
or 'appreciation' in labels or 'back_channeling' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
if mode != "BREAK":
chatterbot_cosine_scores = utils.cosine_similarity_nd(
query_encoding, chatterbot_queries_embd)
chatterbot_queries_, chatterbot_cosine_scores_, _ = top_candidates(
chatterbot_queries, chatterbot_cosine_scores, top=1)
chatterbot_candidates = chatterbot[chatterbot_queries_[0]]
candidates += chatterbot_candidates
retrieved_candidates = retrieve(
conn, c, idx, index, query_encoding, query_context)
if bias is not None:
biases = [0.0 for _ in candidates]
for _ in generated_responses:
biases.append(0.0)
for _ in retrieved_candidates:
biases.append(bias)
biases = np.asarray(biases, np.float32)
else:
biases = None
candidates += generated_responses + retrieved_candidates
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history,
bias=biases)
if response_code == "":
if response in generated_responses:
response_code = "(GENERATED)"
elif response in retrieved_candidates:
response_code = "(RETRIEVED)"
elif response in filtered_bot_candidates:
response_code = "(FROM SCRIPT)"
elif response in chatterbot_candidates:
response_code = "(FROM CHATTERBOT SCRIPT)"
else:
response_code = "(I DON'T KNOW WHERE IT IS FROM)"
print("\n")
if len(str(response).split(" ")) <= 100:
if flags.voice:
entry = utils.simple_preprocess(str(response).lower(),
for_speech=True,
return_tokenized=True)
entry = " ".join(entry)
wavefiles = text2speech.process(entry)
def f1():
utils.delay_print("Bot: "+response)
def f2():
text2speech.play(wavefiles)
p1 = Process(target=f1)
p2 = Process(target=f2)
p1.start()
p2.start()
p1.join()
p2.join()
else:
utils.delay_print("Bot: "+response)
else:
utils.delay_print("Bot: "+response, t=0.01)
print("\n")
conversation_history.append(utterance)
conversation_history.append(response)
if stop_flag == 1:
break
# break
|
#
# (C) Copyright 2003-2010 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Interfaces for flexible API extensions."""
__revision__ = "$Id: error.py 647 2006-08-26 18:27:39Z jajcus $"
__docformat__ = "restructuredtext en"
from pyxmpp.interface import Interface, Attribute
class IPyXMPPHelper(Interface):
"""Base for all interfaces used as PyXMPP helpers."""
class IPresenceHandlersProvider(IPyXMPPHelper):
def get_presence_handlers():
"""Returns iterable over (presence_type, handler[, namespace[, priority]]) tuples.
The tuples will be used as arguments for `Stream.set_presence_handler`."""
class IMessageHandlersProvider(IPyXMPPHelper):
def get_message_handlers():
"""Returns iterable over (message_type, handler[, namespace[, priority]]) tuples.
The tuples will be used as arguments for `Stream.set_message_handler`."""
class IIqHandlersProvider(IPyXMPPHelper):
def get_iq_get_handlers():
"""Returns iterable over (element_name, namespace) tuples.
The tuples will be used as arguments for `Stream.set_iq_get_handler`."""
def get_iq_set_handlers():
"""Returns iterable over (element_name, namespace) tuples.
The tuples will be used as arguments for `Stream.set_iq_set_handler`."""
class IStanzaHandlersProvider(IPresenceHandlersProvider, IMessageHandlersProvider, IIqHandlersProvider):
pass
class IFeaturesProvider(IPyXMPPHelper):
def get_features():
"""Return iterable of namespaces (features) supported, for disco#info
query response."""
__all__ = [ name for name in dir() if name.startswith("I") and name != "Interface" ]
# vi: sts=4 et sw=4
|
from colormath.color_objects import sRGBColor, LabColor
import colormath.color_conversions as color_conversions
import colormath.color_diff as color_diff
import random
class Color(object):
'''
A wrapper of LabColor. Originally designed as a subclass of LabColor, but implemented as a wrapper
because convert_color only accepts (and does not accept subclasses of) LabColor.
'''
def __init__(self, *args, **kwargs):
self.c = LabColor(*args, **kwargs)
def l(self):
return self.c.lab_l
def a(self):
return self.c.lab_a
def b(self):
return self.c.lab_b
def to_rgb(self):
return color_conversions.convert_color(self.c, sRGBColor)
def to_hex(self):
return self.to_rgb().get_rgb_hex()
def to_grayhex(self):
rgb = self.to_rgb().get_value_tuple()
y = self.linear_to_srgb(sum(self.rgb_to_gray_weight[i] * self.srgb_to_linear(rgb[i]) for i in [0,1,2]))
return sRGBColor(y, y, y).get_rgb_hex()
def diff(self, other):
if isinstance(other, LabColor):
return color_diff.delta_e_cie2000(self.c, other)
elif isinstance(other, Color):
return color_diff.delta_e_cie2000(self.c, other.c)
else:
return self.diff(color_conversions.convert_color(other.c, LabColor))
rgb_to_gray_weight = [0.2126, 0.7152, 0.0722] # in linear space
@classmethod
def srgb_to_linear(cls, c):
return c / 12.92 if c < 0.04045 else ((c + 0.055) / 1.055)**2.4
@classmethod
def linear_to_srgb(cls, c):
return c * 12.92 if c < 0.0031308 else 1.055 * (c**(1 / 2.4)) - 0.055
@classmethod
def random_with_fixed_gray(cls, gray):
'''
Generate three random number x, y, z in [0,1) that satisfies
(x, y, z) . rgb_to_gray_weight = gray
This forms a triangle plane in a three dimensional space; then the value is uniformly given by
R = r1 X + r2 Y + (1-r1-r2) Z
with uniform r1 and r2;
X = (1/0.2126, 0, 0),
Y = (0, 1/0.7152, 0),
Z = (0, 0, 1/0.0722).
'''
if gray < 0:
return [0, 0, 0]
elif gray < cls.rgb_to_gray_weight[2]: # the minimal value, 0.0722
while True:
r1 = random.random()
r2 = random.random()
if r1 + r2 < 1:
return [r1 * gray / cls.rgb_to_gray_weight[0],
r2 * gray / cls.rgb_to_gray_weight[1],
(1-r1-r2) * gray / cls.rgb_to_gray_weight[2]]
elif gray > 1 - cls.rgb_to_gray_weight[2]:
return [1 - v for v in cls.random_with_fixed_gray(1-gray)]
else:
y_min = max(0., (gray - 1 + cls.rgb_to_gray_weight[1]) / cls.rgb_to_gray_weight[1])
y_max = min(1., gray / cls.rgb_to_gray_weight[1])
while True:
z = random.random()
y = random.uniform(y_min, y_max)
x = (gray - y * cls.rgb_to_gray_weight[1] - z * cls.rgb_to_gray_weight[2]) / cls.rgb_to_gray_weight[0]
if not (x < 0 or x > 1):
return [x, y, z]
def colorize(self, gray_l=None):
'''
:param gray_l: the value of lab_l when converted to gray
'''
# first convert the gray level to sRGB value
if gray_l is None:
gray_l = self.l()
gray_srgb = color_conversions.convert_color(LabColor(gray_l, 0, 0), sRGBColor).rgb_r
gray_linear = self.srgb_to_linear(gray_srgb)
random_linear = self.random_with_fixed_gray(gray_linear)
random_rgb = [self.linear_to_srgb(c) for c in random_linear]
rgb = sRGBColor(*random_rgb)
self.c = color_conversions.convert_color(rgb, LabColor)
|
def prima(parola):
return parola[0]
def ultima(parola):
return parola[-1]
def mezzo(parola):
return parola[1:-1]
def palindromo(parola):
if len(parola) <= 1:
return True
if prima(parola) != ultima(parola):
return False
return palidromo(mezzo(parola))
# ho aggiunto una stringa a ciò che stampi in modo che si capisca meglio quali parole hai analizzato per vedere se sono palindrome o meno
print("ottetto: " + str(palindromo("ottetto")))
print("andrea: " + str(palindromo("andrea")))
print("otto: " + str(palindromo("otto")))
|
#!/usr/bin/env python
'''
hackflight_companion.py : Companion-board Python code. Runs in
Python2 instead of Python3, so we can install OpenCV without
major hassles.
This file is part of Hackflight.
Hackflight is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Hackflight is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Hackflight. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import cv2
import numpy as np
import threading
from msppg import MSP_Parser, serialize_ATTITUDE_Request, serialize_ALTITUDE_Request, serialize_SET_HEAD
def commsReader(comms_from_client, parser):
while True:
# Read one byte from the client and parse it
bytes = comms_from_client.recv(1)
if len(bytes) > 0:
parser.parse(bytes[0])
def putTextInImage(image, text, x, y, scale, color, thickness=1):
cv2.putText(image, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, scale, color, thickness)
def processImage(image, parser, comms_to_client):
# Blur image to remove noise
frame = cv2.GaussianBlur(image, (3, 3), 0)
# Switch image from BGR colorspace to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Define range of blue color in HSV
bluemin = (100, 50, 10)
bluemax = (255, 255, 255)
# Find where image is in blue range
bluepart = cv2.inRange(hsv, bluemin, bluemax)
# Find coordinates of blue pizels
y, x = np.where(bluepart)
# If a signficant fraction of the pixels are blue
if len(x) / float(np.prod(bluepart.shape)) > 0.2:
# Find the centroid of the blue component
x,y = np.int(np.mean(x)), np.int(np.mean(y))
# Label the centroid point as water
putTextInImage(image, 'WATER', x, y, 1, (0,255,255), 2)
# If we've just seen water for the first time, send a SET_HEADING message to the client
if not parser.over_water:
new_heading = parser.heading - 180
print('set head: %d' % new_heading)
if not comms_to_client is None:
comms_to_client.send(serialize_SET_HEAD(new_heading))
# Set a flag that we've seen water
parser.over_water = True
# Add text for altitude
labelx = 5
labely = 10
labelw = 270
labelh = 20
labelm = 5 # margin
cv2.rectangle(image, (labelx,labely), (labelx+labelw,labely+labelh), (255,255,255), -1) # filled white rectangle
putTextInImage(image, 'ABL = %3.2f m | Heading = %d' % (parser.altitude/100., parser.heading),
labelx+labelm, labely+labelh-labelm, .5, (255,0,0))
class MyParser(MSP_Parser):
def __init__(self):
MSP_Parser.__init__(self)
self.altitude = 0
self.heading = 0
self.over_water = False
def altitudeHandler(self, altitude, vario):
self.altitude = altitude
def attitudeHandler(self, pitch, roll, yaw):
self.heading = yaw
if __name__ == '__main__':
# Create an MSP parser and messages for telemetry requests
parser = MyParser()
parser.set_ATTITUDE_Handler(parser.attitudeHandler)
parser.set_ALTITUDE_Handler(parser.altitudeHandler)
# Serialize the telemetry message requests that we'll send to the "firwmare"
attitude_request = serialize_ATTITUDE_Request()
altitude_request = serialize_ALTITUDE_Request()
# More than two command-line arguments means simulation mode. First arg is camera-client port,
# second is MSP port, third is input image file name, fourth is outpt image file name.
if len(sys.argv) > 2:
from socket_server import serve_socket
# Serve a socket for camera synching, and a socket for comms
camera_client = serve_socket(int(sys.argv[1]))
comms_to_client = serve_socket(int(sys.argv[2]))
comms_from_client = serve_socket(int(sys.argv[3]))
image_from_sim_name = sys.argv[4]
image_to_sim_name = sys.argv[5]
# Run serial comms telemetry reading on its own thread
thread = threading.Thread(target=commsReader, args = (comms_from_client,parser))
thread.daemon = True
thread.start()
while True:
# Receive the camera sync byte from the client
camera_client.recv(1)
# Load the image from the temp file
image = cv2.imread(image_from_sim_name, cv2.IMREAD_COLOR)
# Process it
processImage(image, parser, comms_to_client)
# Write the processed image to a file for the simulator to display
cv2.imwrite(image_to_sim_name, image)
# Send an telemetry request messages to the client
comms_to_client.send(attitude_request)
comms_to_client.send(altitude_request)
# Fewer than three arguments: live mode or camera-test mode
else:
commport = sys.arg[1] if len(sys.argv) > 1 else None
cap = cv2.VideoCapture(0)
while True:
success, image = cap.read()
if success:
# Process image
processImage(image, parser, None)
# Test mode; display image
if commport is None:
cv2.imshow('OpenCV', image)
if cv2.waitKey(1) == 27: # ESC
break
|
import ast
import json
p = "{\'voter1': \"{\'a\':1, \'b\':2}\", \'voter2\': \"{\'a\':0, \'b\':1}\"}"
p1 = ast.literal_eval(p)
p2 = {}
for k, v in p1.items():
p2[k] = ast.literal_eval(v)
print(p2)
print('json: ', json.dumps(p2))
print(p)
print(str(p2))
|
"""
Copyright Matt DeMartino (Stravajiaxen)
Licensed under MIT License -- do whatever you want with this, just don't sue me!
This code attempts to solve Project Euler (projecteuler.net) Problem #18 Maximum path sum I
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
37 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom of the triangle below:
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
NOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route. However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o)
"""
import time
import networkx as nx
def main():
triangle = \
'''75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
# Construct a graph
G = nx.DiGraph()
G.add_node("source")
G.add_node("sink")
triangle = [[int(i) for i in line.split()] for line in triangle.split("\n")]
# Store vals to make it easy to add up the best path at the end
vals = {}
# Constructing the graph edges with weights
for i, row in enumerate(triangle):
for j, val in enumerate(row):
print(val)
G.add_node((i, j))
vals[(i, j)] = val
if i == 0:
G.add_edge("source", (i, j), weight=100-val, num=val) # 100-val lets us make it a minimization problem
else:
if j == 0: # Don't do j-1
G.add_edge((i-1, j), (i, j), weight=100-val, num=val)
if j == i: # Don't do j+1
G.add_edge((i-1, j-1), (i, j), weight=100-val, num=val)
if j!=0 and j!= i: # Add two edges
G.add_edge((i-1, j-1), (i, j), weight=100-val, num=val)
G.add_edge((i-1, j), (i, j), weight=100-val, num=val)
# Connect to the sink
for j, val in enumerate(triangle[-1]):
G.add_edge((len(triangle)-1, j), "sink", weight=100-val, num=val)
path = nx.dijkstra_path(G, "source", "sink")
# Sum up the vals of the edges along the best path.
tot = 0
for edge in path:
if edge != "source" and edge != "sink":
tot += vals[edge]
print(tot)
if __name__ == "__main__":
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("Elapsed Time: ", elapsed_time)
|
"""
AUTHOR - Atharva Deshpande
GITHUB - https://github.com/AtharvaD11
QUESTION LINK - https://www.codechef.com/LRNDSA01/problems/MULTHREE
"""
HINT - Please learn Modulo Arithmetic first.
***********************************************
for _ in range(int(input())):
k,d0,d1 = map(int,input().split())
s = d0+d1
total = 0
if k== 2:
total = s
else:
c = (2*s)%10 + (4*s)%10 +(6*s)%10 + (8*s)%10
num_cycles = (k-3)//4
total = s +(s%10) + (c*num_cycles)
left_over = (k-3) - (num_cycles*4)
p = 2
for i in range(1,left_over+1):
total += (p*s)%10
p = (p*2)%10
if total%3==0:
print("YES")
else:
print("NO")
|
from secml.optim.optimizers.tests import COptimizerTestCases
from secml.optim.optimizers import COptimizerPGD
class TestCOptimizerPGD(COptimizerTestCases):
"""Unittests for COptimizerPGDLS."""
def test_minimize_3h_camel(self):
"""Test for COptimizer.minimize() method on 3h-camel fun."""
opt_params = {'eta': 1e-1, 'eps': 1e-12}
self._test_minimize(
COptimizerPGD, '3h-camel', opt_params=opt_params)
def test_minimize_beale(self):
"""Test for COptimizer.minimize() method on beale fun."""
opt_params = {'eta': 1e-2, 'eps': 1e-12, 'max_iter': 2000}
self._test_minimize(
COptimizerPGD, 'beale', opt_params=opt_params)
def test_minimize_mc_cormick(self):
"""Test for COptimizer.minimize() method on mc-cormick fun."""
from secml.optim.function import CFunctionMcCormick
from secml.optim.constraints import CConstraintBox
opt_params = {'eta': 1e-1, 'eps': 1e-12,
'bounds': CConstraintBox(*CFunctionMcCormick.bounds())}
self._test_minimize(
COptimizerPGD, 'mc-cormick', opt_params=opt_params)
def test_minimize_rosenbrock(self):
"""Test for COptimizer.minimize() method on rosenbrock fun."""
opt_params = {'eta': 0.002, 'eps': 1e-12, 'max_iter': 8000}
self._test_minimize(
COptimizerPGD, 'rosenbrock', opt_params=opt_params)
def test_constr_bounds(self):
"""Test for COptimizer.minimize() method behaviour
depending on constraint and bounds."""
self._test_constr_bounds(COptimizerPGD)
if __name__ == '__main__':
COptimizerTestCases.main()
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
RegSeg2
"""
from datetime import date
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
PACKAGE_NAME = 'regseg'
__author__ = 'Oscar Esteban'
__email__ = 'code@oscaresteban.es'
__maintainer__ = 'Oscar Esteban'
__copyright__ = ('Copyright %d, %s and Center for Reproducible Neuroscience, '
'Stanford University') % (date.today().year, __author__)
__credits__ = __author__
__license__ = 'MIT License'
__status__ = '3 - Alpha'
__description__ = 'Surface-driven 3D image registration in python'
__longdesc__ = ('RegSeg is an image joint segmentation-registration method that '
'maps surfaces into volumetric, multivariate 3D data. The surfaces '
'must be triangular meshes, and they drive the registration process '
'in a way that the parametric properties of the regions defined by '
'the surfaces are best fitted.')
__url__ = 'http://{}.readthedocs.org/'.format(PACKAGE_NAME)
__download__ = ('https://github.com/oesteban/{}-2/archive/'
'{}.tar.gz').format(PACKAGE_NAME, __version__)
CLASSIFIERS = [
'Development Status :: %s' % __status__,
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: %s' % __license__,
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
SETUP_REQUIRES = []
REQUIRES = [
'numpy>=1.12.0',
'scikit-learn>=0.19.0',
'scipy',
'six',
'nibabel',
'versioneer',
]
LINKS_REQUIRES = [
]
TESTS_REQUIRES = [
'pytest',
'codecov',
'pytest-xdist',
]
EXTRA_REQUIRES = {
'doc': ['sphinx>=1.5,<1.6', 'sphinx_rtd_theme>=0.2.4', 'sphinx-argparse'],
'tests': TESTS_REQUIRES,
'notebooks': ['ipython', 'jupyter'],
}
# Enable a handle to install all extra dependencies at once
EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
|
# Copyright 2020 Toyota Research Institute. All rights reserved.
import MinkowskiEngine as ME
import torch.nn as nn
from packnet_sfm.networks.layers.minkowski import \
sparsify_depth, densify_features, densify_add_features_unc, map_add_features
class MinkConv2D(nn.Module):
"""
Minkowski Convolutional Block
Parameters
----------
in_planes : number of input channels
out_planes : number of output channels
kernel_size : convolutional kernel size
stride : convolutional stride
with_uncertainty : with uncertainty or now
add_rgb : add RGB information as channels
"""
def __init__(self, in_planes, out_planes, kernel_size, stride,
with_uncertainty=False, add_rgb=False):
super().__init__()
self.layer3 = nn.Sequential(
ME.MinkowskiConvolution(
in_planes, out_planes * 2, kernel_size=kernel_size, stride=1, dimension=2),
ME.MinkowskiBatchNorm(out_planes * 2),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiConvolution(
out_planes * 2, out_planes * 2, kernel_size=kernel_size, stride=1, dimension=2),
ME.MinkowskiBatchNorm(out_planes * 2),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiConvolution(
out_planes * 2, out_planes, kernel_size=kernel_size, stride=1, dimension=2),
)
self.layer2 = nn.Sequential(
ME.MinkowskiConvolution(
in_planes, out_planes * 2, kernel_size=kernel_size, stride=1, dimension=2),
ME.MinkowskiBatchNorm(out_planes * 2),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiConvolution(
out_planes * 2, out_planes, kernel_size=kernel_size, stride=1, dimension=2),
)
self.layer1 = nn.Sequential(
ME.MinkowskiConvolution(
in_planes, out_planes, kernel_size=kernel_size, stride=1, dimension=2),
)
self.layer_final = nn.Sequential(
ME.MinkowskiBatchNorm(out_planes),
ME.MinkowskiReLU(inplace=True)
)
self.pool = None if stride == 1 else ME.MinkowskiMaxPooling(3, stride, dimension=2)
self.add_rgb = add_rgb
self.with_uncertainty = with_uncertainty
if with_uncertainty:
self.unc_layer = nn.Sequential(
ME.MinkowskiConvolution(
out_planes, 1, kernel_size=3, stride=1, dimension=2),
ME.MinkowskiSigmoid()
)
def forward(self, x):
"""
Processes sparse information
Parameters
----------
x : Sparse tensor
Returns
-------
Processed tensor
"""
if self.pool is not None:
x = self.pool(x)
x1 = self.layer1(x)
x2 = self.layer2(x)
x3 = self.layer3(x)
return None, self.layer_final(x1 + x2 + x3)
class MinkowskiEncoder(nn.Module):
"""
Depth completion Minkowski Encoder
Parameters
----------
channels : number of channels
with_uncertainty : with uncertainty or not
add_rgb : add RGB information to depth features or not
"""
def __init__(self, channels, with_uncertainty=False, add_rgb=False):
super().__init__()
self.mconvs = nn.ModuleList()
kernel_sizes = [5, 5] + [3] * (len(channels) - 2)
self.mconvs.append(
MinkConv2D(1, channels[0], kernel_sizes[0], 2,
with_uncertainty=with_uncertainty))
for i in range(0, len(channels) - 1):
self.mconvs.append(
MinkConv2D(channels[i], channels[i+1], kernel_sizes[i+1], 2,
with_uncertainty=with_uncertainty))
self.d = self.n = self.shape = 0
self.with_uncertainty = with_uncertainty
self.add_rgb = add_rgb
self.nr_layers = len(kernel_sizes)
def prep(self, d):
self.d = sparsify_depth(d)
self.shape = d.shape
self.n = 0
def forward(self):
unc, self.d = self.mconvs[self.n](self.d)
self.n += 1
out = densify_features(self.d, self.shape)
return out
# def forward(self, d):
# d = sparsify_depth(d)
# shape = d.shape
# unc, d1 = self.mconvs[0](d)
# unc, d2 = self.mconvs[1](d1)
# unc, d3 = self.mconvs[2](d2)
# unc, d4 = self.mconvs[3](d3)
# out1 = densify_features(d1, shape)
# out2 = densify_features(d2, shape)
# out3 = densify_features(d3, shape)
# out4 = densify_features(d4, shape)
# return out1, out2, out3, out4
|
from PyQt5.QtCore import pyqtSignal, QThread
__all__ = ['LoadingThread']
class LoadingThread(QThread):
"""This is base class of thread for using with LoadingWrapper
The idea is to move some heavy operations to a special thread and show
progress on the LoadingDialog.
This actually decreases perfomance a bit because of GIL, but improves user
experience"""
updateStatus = pyqtSignal(str) # Update status string
updatePercent = pyqtSignal(int) # Update a percent
updateMaxPercent = pyqtSignal(int) # Update maximum percent
loadingDone = pyqtSignal() # Finish loading
def __init__(self, parent=None):
super().__init__(parent)
self.operation = 'Operation'
self.i = 0
self.interval = -1
def set_interval(self, iter_num):
"""Set maximum number of operations
:param iter_num: Number of operations
"""
self.total = iter_num
if iter_num <= 100:
self.interval = 1
else:
self.interval = int(iter_num / 100)
self.updateMaxPercent.emit(100)
def check_percent(self, iter_):
"""Update percent for current operation number.
Intended to be used after LoadingThread.set_interval
:param iter_: 0 <= iter_ <= iter_num
"""
if self.interval == 1:
self.updatePercent.emit(int(iter_ / self.total * 100))
elif self.interval < 0:
return
else:
self.i += 1
if self.i == self.interval:
self.updatePercent.emit(int(iter_ / self.total * 100))
self.i = 0
def run(self, *args, **kwargs):
raise NotImplementedError(f'{self.run} is not implemented')
|
# coding=utf-8
"""
DBWriter.py
Describes the class that runs as a separate process and writes data to the database as it is pushed into a global queue.
"""
import multiprocessing
import configparser
import psycopg2
from typing import List, Tuple
from websites import ExtractedArticle
from psycopg2 import sql
class DBWriter(multiprocessing.Process):
"""
This class is responsible for checking the global queue for objects. If any have been placed, it will then write
them to the database. It runs in it's own process.
"""
# ******************************************************************************************************************
def __init__(self, inQueue: multiprocessing.Queue, inConfigObject: configparser.ConfigParser):
"""
Perform our specific setup
:param inQueue: multiprocessing.Queue object to read from
"""
try:
multiprocessing.Process.__init__(self, group=None)
# Flag to check if we created OK.
self.good = True
# Save the global queue
if not inQueue:
self.good = False
# Don't bother running if we don't have a Queue object passed in
return
self._queue = inQueue
# DB Parameters
self._DBHost = str()
self._DBPort = str()
self._DBUser = str()
self._DBPassword = str()
self._DBTable = str()
# DB Objects
self._DBConnection = None
self._DBCursor = None
# Read the configuration
if not self._ReadConfiguration(inConfigObject):
self.good = False
# If we can't read the full configuration, then no need to continue
return
# Create the database object and cursor
if not self._CreateDBObjects():
self.good = False
# If we can't create the DB connection, no need to continue
return
except Exception as e:
print("Exception in DBWriter::__init__: {}".format(e))
self.good = False
# ******************************************************************************************************************
def __del__(self):
"""
Clean up after ourselves
:return: None
"""
try:
# In case we have any commits outstanding.
if self._DBConnection:
self._DBConnection.commit()
# Now close out
if self._DBCursor:
self._DBCursor.close()
# No need to check here, if it's None the except will catch and ignore
self._DBConnection.close()
except Exception as e:
print("Exception in DBWriter::__del__: {}".format(e))
# ******************************************************************************************************************
def _ResetSQLConnection(self) -> bool:
"""
Psycopg2 tends to just stop working sometimes on error, so here we just reset our connection
"""
try:
if self._DBCursor:
self._DBCursor.close()
if self._DBConnection:
self._DBConnection.close()
self._DBConnection = psycopg2.connect(host=self._DBHost,
port=self._DBPort,
dbname=self._DBTable,
user=self._DBUser,
password=self._DBPassword)
self._DBCursor = self._DBConnection.cursor()
return True
except Exception as e:
print("Exception in DBWriter::_ResetSQLConnection: {}".format(e))
return False
# ******************************************************************************************************************
def _ReadConfiguration(self, inConfigObject: configparser.ConfigParser) -> bool:
"""
Use the passed-in ConfigParser object to set ourselves up
:return: True if OK, False otherwise
"""
try:
# Get the top level
DB = inConfigObject["DB"]
# Now set our member variables
self._DBHost = DB["DBHost"]
self._DBPort = DB["DBPort"]
self._DBUser = DB["DBUser"]
self._DBPassword = DB["DBPassword"]
self._DBTable = DB["DBTable"]
return True
except Exception as e:
print("Exception in DBWriter::_ReadConfiguration: {}".format(e))
self.good = False
return False
# ******************************************************************************************************************
def _CreateDBObjects(self) -> bool:
"""
Create the psycopg2 object and cursor for the object
:return: True if successful, False otherwise
"""
try:
self._DBConnection = psycopg2.connect(
"host={} port={} user={} password={} dbname={}".format(self._DBHost,
self._DBPort,
self._DBUser,
self._DBPassword,
self._DBTable))
if not self._DBConnection:
self.good = False
return False
self._DBCursor = self._DBConnection.cursor()
if not self._DBCursor:
self.good = False
return False
return True
except Exception as e:
self.good = False
print("Exception in DBWriter::_CreateDBObjects: {}".format(e))
return False
# ******************************************************************************************************************
def _InsertArticlePeople(self, inArticleID: int, inPeopleID: int):
"""
Insert the passed-in values to the database.
:param inArticleID: integer article identifier
:param inPeopleID: integer person identifier
:return: none
"""
try:
# Do not do anything if nothing passed in
if not inArticleID or not inPeopleID:
return
# Don't do anything if we are debugging
if inArticleID == -1:
return
print("INSERT INTO article_people (article_id, people_id) VALUES ({}, {});".format(inArticleID, inPeopleID))
self._DBCursor.execute("INSERT INTO article_people (article_id, people_id) VALUES (%s, %s);",
(inArticleID, inPeopleID))
self._DBConnection.commit()
except Exception as e:
print("Exception in DBWriter::_InsertArticlePeople: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _InsertArticleLocations(self, inArticleID: int, inLocationID: int):
"""
Insert the passed-in values to the database.
:param inArticleID: integer article identifier
:param inLocationID: integer location identifier
:return: none
"""
try:
# Do not do anything if nothing passed in
if not inArticleID or not inLocationID:
return
# Don't do anything if we are debugging
if inArticleID == -1:
return
print("INSERT INTO article_locations (article_id, location_id) VALUES ({}, {});".format(inArticleID,
inLocationID))
self._DBCursor.execute("INSERT INTO article_locations (article_id, location_id) VALUES (%s, %s);",
(inArticleID, inLocationID))
self._DBConnection.commit()
except Exception as e:
print("Exception in DBWriter::_InsertArticleLocations: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _InsertArticleFacilities(self, inArticleID: int, inFacilityID: int):
"""
Insert the passed-in values to the database.
:param inArticleID: integer article identifier
:param inFacilityID: integer facility identifier
:return: none
"""
try:
# Do not do anything if nothing passed in
if not inArticleID or not inFacilityID:
return
# Don't do anything if we are debugging
if inArticleID == -1:
return
print("INSERT INTO article_facilities (article_id, facility_id) VALUES ({}, {});".format(inArticleID,
inFacilityID))
self._DBCursor.execute("INSERT INTO article_facilities (article_id, facility_id) VALUES (%s, %s);",
(inArticleID, inFacilityID))
self._DBConnection.commit()
except Exception as e:
print("Exception in DBWriter::_InsertArticleFacilities: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _InsertArticleOrganizations(self, inArticleID: int, inOrganizationID: int):
"""
Insert the passed-in values to the database.
:param inArticleID: integer article identifier
:param inOrganizationID: integer organization identifier
:return: none
"""
try:
# Do not do anything if nothing passed in
if not inArticleID or not inOrganizationID:
return
# Don't do anything if we are debugging
if inArticleID == -1:
return
print("INSERT INTO article_organizations (article_id, organization_id) VALUES ({}, {});".format(inArticleID,
inOrganizationID))
self._DBCursor.execute("INSERT INTO article_organizations (article_id, organization_Id) VALUES (%s, %s);",
(inArticleID, inOrganizationID))
self._DBConnection.commit()
except Exception as e:
print("Exception in DBWriter::_InsertArticleOrganizations: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _InsertArticleEvents(self, inArticleID: int, inEventID: int):
"""
Insert the passed-in values to the database.
:param inArticleID: integer article identifier
:param inEventID: integer event identifier
:return: none
"""
try:
# Do not do anything if nothing passed in
if not inArticleID or not inEventID:
return
# Don't do anything if we are debugging
if inArticleID == -1:
return
print("INSERT INTO article_events (article_id, event_id) VALUES ({}, {});".format(inArticleID, inEventID))
self._DBCursor.execute("INSERT INTO article_events (article_id, event_id) VALUES (%s, %s);",
(inArticleID, inEventID))
self._DBConnection.commit()
except Exception as e:
print("Exception in DBWriter::_InsertArticleEvents: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _InsertPerson(self, inPersonName: str) -> int:
"""
Inserts a person into the database and returns the value of the inserted name
:param inPersonName: string to insert
:return: integer representing the id of the added person, -1 on error
"""
try:
if not inPersonName:
return -1
print("INSERT INTO people (name) VALUES ({}) RETURNING id;".format(inPersonName))
self._DBCursor.execute("INSERT INTO people (name) VALUES (%s) RETURNING id;", (inPersonName,))
self._DBConnection.commit()
result = self._DBCursor.fetchone()
if result:
return result[0]
else:
return -1
except Exception as e:
print("Exception in DBWriter::_InsertPerson: {}".format(e))
self._ResetSQLConnection()
return -1
# ******************************************************************************************************************
def _InsertLocation(self, inLocationName: str) -> int:
"""
Inserts a location into the database and returns the value of the inserted name
:param inLocationName: string to insert
:return: integer representing the id of the added location, -1 on error
"""
try:
if not inLocationName:
return -1
print("INSERT INTO locations (name) VALUES ({}) RETURNING id;".format(inLocationName))
self._DBCursor.execute("INSERT INTO locations (name) VALUES (%s) RETURNING id;", (inLocationName,))
self._DBConnection.commit()
result = self._DBCursor.fetchone()
if result:
return result[0]
else:
return -1
except Exception as e:
print("Exception in DBWriter::_InsertLocation: {}".format(e))
self._ResetSQLConnection()
return -1
# ******************************************************************************************************************
def _InsertFacility(self, inFacilityName: str) -> int:
"""
Inserts a location into the database and returns the value of the inserted name
:param inFacilityName: string to insert
:return: integer representing the id of the added facility, -1 on error
"""
try:
if not inFacilityName:
return -1
print("INSERT INTO facilities (name) VALUES ({}) RETURNING id;".format(inFacilityName))
self._DBCursor.execute("INSERT INTO facilities (name) VALUES (%s) RETURNING id;", (inFacilityName,))
self._DBConnection.commit()
result = self._DBCursor.fetchone()
if result:
return result[0]
else:
return -1
except Exception as e:
print("Exception in DBWriter::_InsertFacility: {}".format(e))
self._ResetSQLConnection()
return -1
# ******************************************************************************************************************
def _InsertOrganization(self, inOrganizationName: str) -> int:
"""
Inserts a location into the database and returns the value of the inserted name
:param inOrganizationName: string to insert
:return: integer representing the id of the added organization, -1 on error
"""
try:
if not inOrganizationName:
return -1
print("INSERT INTO organizations (name) VALUES ({}) RETURNING id;".format(inOrganizationName))
self._DBCursor.execute("INSERT INTO organizations (name) VALUES (%s) RETURNING id;",
(inOrganizationName,))
self._DBConnection.commit()
result = self._DBCursor.fetchone()
if result:
return result[0]
else:
return -1
except Exception as e:
print("Exception in DBWriter::_InsertOrganization: {}".format(e))
self._ResetSQLConnection()
return -1
# ******************************************************************************************************************
def _InsertEvent(self, inEventName: str) -> int:
"""
Inserts a location into the database and returns the value of the inserted name
:param inEventName: string to insert
:return: integer representing the id of the added event, -1 on error
"""
try:
if not inEventName:
return -1
print("INSERT INTO event (name) VALUES ({}) RETURNING id;".format(inEventName))
self._DBCursor.execute("INSERT INTO event (name) VALUES (%s) RETURNING id;", (inEventName,))
self._DBConnection.commit()
result = self._DBCursor.fetchone()
if result:
return result[0]
else:
return -1
except Exception as e:
print("Exception in DBWriter::_InsertEvent: {}".format(e))
self._ResetSQLConnection()
return -1
# ******************************************************************************************************************
def _ProcessPeople(self, inArticleID: int, inPeopleList: list):
"""
Takes the input list of people and article id. Searches the DB for a match for the person name. If found, add
an entry to the ArticlesPeople otherwise add a new person and then add the entry.
:param inArticleID: Integer identifier of the added article
:param inPeopleList: list of people to link to the article
:return:
"""
try:
for person in inPeopleList:
personID = self._GetUniqueCommon(person, "people")
if personID != -1:
self._InsertArticlePeople(inArticleID, personID)
else:
# We could not (reliably) find a single match in the DB. Add the person and then the
# article link.
personID = self._InsertPerson(person)
if personID != -1:
self._InsertArticlePeople(inArticleID, personID)
except Exception as e:
print("Exception in DBWriter::_ProcessPeople: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _ProcessLocations(self, inArticleID: int, inLocationList: list):
"""
Takes the input list of locations and article id. Searches the DB for a match for the location name. If found,
add an entry to the ArticlesLocations, otherwise add a new location and then add the entry.
:param inArticleID: Integer identifier of the added article
:param inLocationList: list of locations to link to the article
:return:
"""
# FIXME This needs to be modified to do a better job finding locations (ie, finish searching for other fields logic, etc.
adminList = None
print("Locations: *****************************")
print(inLocationList)
try:
for location in inLocationList:
# Run location disambiguation case 1 (check for a unique location result)
locationID = self._GetUniqueCommon(location, "locations")
if locationID != -1:
self._InsertArticleLocations(inArticleID, locationID)
continue
# For Case 2, we first get check the list of locations to see if any of them are countries or states
# If so, we check the other locations to see if they are in the list of countries that we got back.
# Populate adminList if we haven't already
if not adminList:
adminList = self._FindCountries(inLocationList)
# If we handled it here, move to the next location
if self._LocationCaseTwo(inArticleID, location, adminList):
continue
# Are there multiple exact name matches? If so pick the one with the highest population
if self._LocationCaseThree(inArticleID, location):
continue
# So if we get here, we could not (reliably) find a single match in the DB. Add the location and then
# the article link.
locationID = self._InsertLocation(location)
if locationID != -1:
self._InsertArticleLocations(inArticleID, locationID)
except Exception as e:
print("Exception in DBWriter::_ProcessLocations: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _ProcessFacilities(self, inArticleID: int, inFacilityList: list):
"""
Takes the input list of facilities and article id. Searches the DB for a match for the facility name. If found,
add an entry to the ArticlesFacilities, otherwise add a new facility and then add the entry.
:param inArticleID: Integer identifier of the added article
:param inFacilityList: list of facilities to link to the article
:return:
"""
try:
for facility in inFacilityList:
facilityID = self._GetUniqueCommon(facility, "facilities")
if facilityID != -1:
self._InsertArticleFacilities(inArticleID, facilityID)
else:
# We could not (reliably) find a single match in the DB. Add the person and then the
# article link.
facilityID = self._InsertFacility(facility)
if facilityID != -1:
self._InsertArticleFacilities(inArticleID, facilityID)
except Exception as e:
print("Exception in DBWriter::_ProcessFacilities: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _ProcessOrganizations(self, inArticleID: int, inOrganizationList: list):
"""
Takes the input list of organizations and article id. Searches the DB for a match for the organization name.
If found, add an entry to the ArticlesOrganizations, otherwise add a new organization and then add the entry.
:param inArticleID: Integer identifier of the added article
:param inOrganizationList: list of organizations to link to the article
:return:
"""
try:
for organization in inOrganizationList:
organizationID = self._GetUniqueCommon(organization, "organizations")
if organizationID != -1:
self._InsertArticleOrganizations(inArticleID, organizationID)
else:
# We could not (reliably) find a single match in the DB. Add the person and then the
# article link.
organizationID = self._InsertOrganization(organization)
if organizationID != -1:
self._InsertArticleOrganizations(inArticleID, organizationID)
except Exception as e:
print("Exception in DBWriter::_ProcessOrganizations: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def _ProcessEvents(self, inArticleID: int, inEventList: list):
"""
Takes the input list of events and article id. Searches the DB for a match for the event name.
If found, add an entry to the ArticlesEvents, otherwise add a new event and then add the entry.
:param inArticleID: Integer identifier of the added article
:param inEventList: list of events to link to the article
:return:
"""
try:
for event in inEventList:
eventID = self._GetUniqueCommon(event, "event")
if eventID != -1:
self._InsertArticleEvents(inArticleID, eventID)
else:
# We could not (reliably) find a single match in the DB. Add the person and then the
# article link.
eventID = self._InsertEvent(event)
if eventID != -1:
self._InsertArticleEvents(inArticleID, eventID)
except Exception as e:
print("Exception in DBWriter::_ProcessEvents: {}".format(e))
self._ResetSQLConnection()
# ******************************************************************************************************************
def WriteEntries(self, inArticleObject: ExtractedArticle):
"""
This is the main part of the thread that pulls from the global queue and then writes it into the database
:return:
"""
try:
# Push the article metadata to the DB and get the assigned identifier for it.
print("""
INSERT INTO news_articles (article_title, article_url, article_text, website)
VALUES ({}, {}, {}, {}) RETURNING id;
""".format(inArticleObject.articleTitle, inArticleObject.articleURL, inArticleObject.articleText,
inArticleObject.website))
self._DBCursor.execute("""
INSERT INTO news_articles (article_title, article_url, article_text, website)
VALUES (%s, %s, %s, %s) RETURNING id;
""",
(inArticleObject.articleTitle, inArticleObject.articleURL,
inArticleObject.articleText, inArticleObject.website))
self._DBConnection.commit()
# Get the ID of the inserted object
articleID = self._DBCursor.fetchone()[0]
# Now populate the article_people table
self._ProcessPeople(articleID, inArticleObject.people)
# Now populate the article_facilities table
self._ProcessFacilities(articleID, inArticleObject.facilities)
# Now populate the article_organizations table
self._ProcessOrganizations(articleID, inArticleObject.organizations)
# Now populate the article_events table
self._ProcessEvents(articleID, inArticleObject.events)
# Now populate the article_locations table
self._ProcessLocations(articleID, inArticleObject.locations)
except Exception as e:
print("Exception in DBWriter::WriteEntries: {}".format(e))
self.good = False
self._ResetSQLConnection()
# ******************************************************************************************************************
def run(self):
"""
Override to handle the main loop of the process
:return:
"""
continueFlag = True
while continueFlag:
articleObject = self._queue.get()
# Check for our poison pill
if articleObject.articleText == "EXITCALLED":
continueFlag = False
continue
self.WriteEntries(articleObject)
# ******************************************************************************************************************
def _LocationCaseTwo(self, inArticleID: int, inLocation: str, inAdminList: List[Tuple]) -> bool:
"""
Check to see if any of the locations are in any of the administrative areas in adminList. We commit IFF we get
a single match for any of the locations.
:param inArticleID: ID of the article we're processing
:param inLocation: Input location string to check
:param inAdminList: list of tuples with administative names and their CC2 codes.
:return: True if we wrote the entry to the database
"""
if not inAdminList or not inLocation:
return False
try:
for tempAdmin in inAdminList:
# FIXME: Skip for now if it's a country, may want to add it in the future.
# if inLocation in dict(inAdminList):
# continue
# First check if we get a single match for the location vs the country.
self._DBCursor.execute("SELECT id FROM locations WHERE name = %s AND cc2 = %s;",
(inLocation, tempAdmin[1]))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
self._InsertArticleLocations(inArticleID, locationID[0])
return True
elif self._DBCursor.rowcount > 1:
# Pick the match with the highest population.
self._DBCursor.execute("""
SELECT id, population FROM locations WHERE name = %s AND cc2 = %s AND
population IS NOT NULL ORDER BY population DESC LIMIT 1;
""", (inLocation, tempAdmin[1]))
self._DBConnection.commit()
locationID = self._DBCursor.fetchone()
self._InsertArticleLocations(inArticleID, locationID[0])
return True
# Second, check for a similarity score but we only use it if we get a single result.
self._DBCursor.execute("""
SELECT id FROM locations WHERE SIMILARITY(name, %s) > 0.5 AND cc2 = %s;
""", (inLocation, tempAdmin[1]))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
self._InsertArticleLocations(inArticleID, locationID[0])
return True
# Check aliases
# Second check: Look in the aliases column
nameString = inLocation.replace("'", "''").replace('"', '\\"')
sqlString = "SELECT id FROM locations WHERE aliases @> '{%s}'" % nameString
sqlString = sqlString + " AND cc2 = %s;"
self._DBCursor.execute(sqlString, (tempAdmin[1],))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
# Only add if we got a single result (since there are cases of multiple Northern Provinces, etc
locationID = self._DBCursor.fetchone()
if locationID:
self._InsertArticleLocations(inArticleID, locationID[0])
return True
except Exception as e:
print("DBWriter::_LocationCaseTwo Exception: {}".format(e))
self._ResetSQLConnection()
return False
# If we got here, we didn't find a perfect match.
return False
# ******************************************************************************************************************
def _LocationCaseThree(self, inArticleID: int, inLocation: str) -> bool:
"""
Perform a search. If we get multiple name matches, pick the one with the highest population.
:param inArticleID: ID of the article we're processing
:param inLocation: Input location string to check
:return: True if we wrote the entry to the database
"""
if not inArticleID or not inLocation:
return False
try:
self._DBCursor.execute("""
SELECT id, population FROM locations WHERE name = %s AND
population IS NOT NULL ORDER BY population DESC;
""", (inLocation, ))
self._DBConnection.commit()
if self._DBCursor.rowcount > 1:
locationID = self._DBCursor.fetchone()
self._InsertArticleLocations(inArticleID, locationID[0])
return True
except Exception as e:
print("DBWriter::_LocationCaseThree Exception: {}".format(e))
self._ResetSQLConnection()
return False
# If we got here, we didn't find any matches.
return False
# ******************************************************************************************************************
def _FindCountries(self, inLocations: list) -> List[Tuple]:
"""
Check the input list and return a new list if the name in the list is in the locations db with a fclasscode of
A.ADM1
:param inLocations: list of location names to check
:return: list of locations that are countries or states
"""
returnList = list()
try:
for location in inLocations:
# First, Is it an exact name match?
self._DBCursor.execute("SELECT name, cc2 from countries where name = %s;", (location,))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
# Only add if we got a single result (since there are cases of multiple Northern Provinces, etc
locationID = self._DBCursor.fetchone()
if locationID:
returnList.append((locationID[0], locationID[1]))
continue
# Second check: Look in the aliases column
nameString = location.replace("'", "''").replace('"', '\\"')
sqlString = "SELECT name, cc2 FROM countries WHERE aliases @> '{%s}';" % nameString
self._DBCursor.execute(sqlString)
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
# Only add if we got a single result (since there are cases of multiple Northern Provinces, etc
locationID = self._DBCursor.fetchone()
if locationID:
returnList.append((locationID[0], locationID[1]))
continue
# Third check: Look for a single row that has a similarity > 0.5 (IFF there was a single result)
self._DBCursor.execute("SELECT name, cc2 FROM countries WHERE SIMILARITY(name, %s) > 0.5;",
(location,))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
returnList.append((locationID[0], locationID[1]))
continue
# Fourth check. Check similarity in the aliases column.
self._DBCursor.execute("""
SELECT name, cc2 FROM countries, UNNEST(countries.aliases) p
WHERE SIMILARITY(p, %s) > 0.5;
""", (location,))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
returnList.append((locationID[0], locationID[1]))
continue
except Exception as e:
print("Got exception in DBWriter::_FindCountries: {}".format(e))
return list()
return returnList
# ******************************************************************************************************************
def _GetUniqueCommon(self, inName: str, inTable: str) -> int:
"""
This logic was broken out as several functions use it as part of trying to find a match for an input name, be it
person, places, or things.
:param inName: Input name to search for
:param inTable: Input table name for the query
:return: ID of the found name, -1 otherwise
"""
try:
# First check for exact matches
self._DBCursor.execute(sql.SQL("SELECT id FROM {} WHERE name = %s;").format(sql.Identifier(inTable)),
(inName,))
self._DBConnection.commit()
# We got a single result on the location from the Locations DB so just use it.
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
return int(locationID[0])
# Second check: look in the aliases column for an exact match unless it's locations
# Create the SQL string. We're trying to shoehorn a string here so need to do it twice because psycopg2
# sql module does not like having @> in a string.
nameString = inName.replace("'", "''").replace('"', '\\"')
sqlString = "SELECT id FROM {} ".format(inTable)
sqlString = sqlString + "WHERE aliases @> '{%s}';" % nameString
self._DBCursor.execute(sqlString)
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
return int(locationID[0])
# Third check: Look for a single row that has a similarity > 0.5 (IFF there was a single result)
self._DBCursor.execute(sql.SQL("SELECT id FROM {} WHERE SIMILARITY(name, %s) > 0.5;").
format(sql.Identifier(inTable)), (inName, ))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
return int(locationID[0])
#if inTable != "locations":
# Fourth check: Look for a match in aliases with a similarity() > 0.5 (IFF there was a single result)
self._DBCursor.execute(sql.SQL(
"""
SELECT id FROM {}, UNNEST({}.aliases) p WHERE SIMILARITY(p, %s) > 0.5;
""").format(sql.Identifier(inTable), sql.Identifier(inTable)), (inName,))
self._DBConnection.commit()
if self._DBCursor.rowcount == 1:
locationID = self._DBCursor.fetchone()
if locationID:
return int(locationID[0])
except Exception as e:
print("Exception in DBWriter::_GetUniqueCommon: {}".format(e))
return -1
# if we made it here we didn't find anything
return -1
|
#!/usr/bin/env python3
'''
Polychora: Python animation of uniform polychora in 4 and higher dimensions
Author: M. Patrick Kelly
Email: patrickyunen@gmail.com
Last Updated: 12-9-2020
'''
from itertools import product, permutations
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
dimension = 4
views = {3:-4, 4:-3, 5:-3, 6:-3} #Camera positions for projection
#edge_width = 1 #Hypercube edge width; will be scaled for depth cueing
class Nhypercube():
# Stores vertices and edges of an N-dimensional hypercube
def __init__(self, dimension=4):
points = [-1,1]
self.dimension = dimension
self.vertices = np.asarray(list(product(points, repeat=dimension)))
self.edges = [(j,k)
for j in range(2**dimension)
for k in range(j,2**dimension)
if sum(abs(self.vertices[j] - self.vertices[k])) == 2]
class Dodecaplex():
# Stores vertices and edges of a 120-cell or dodecaplex
# Coordinates calculated in advance and saved to file
def __init__(self, scale=1):
self.vertices = scale*np.load('dodecaplex.npy')
self.edges = []
with open('dodecaplex_edges.txt', 'r') as fh:
for line in fh.readlines():
my_tuple = tuple(int(line.split()[i]) for i in range(2))
self.edges.append(my_tuple)
def get_random_rotation(N, theta):
# Returns N-dimensional rotation matrix of theta radians w/ random orientation
# Two random vectors define a hyperplane of rotation
v1 = np.array([np.random.uniform(-1, 1) for i in range(N)])
v2 = np.array([np.random.uniform(-1, 1) for i in range(N)])
# Use Gram-Schmidt to orthogonalize these vectors
u2 = v2 - (np.dot(v1, v2) / np.dot(v1, v1)) * v1
# Then normalize
normed1 = v1 / np.sqrt((np.dot(v1, v1)))
normed2 = u2 / np.sqrt((np.dot(u2, u2)))
# Plug into the generalized N-dimensional Rodrigues rotation formula:
# R = I + ((n2⨂n1)-(n1⨂n2))sin(α) + ((n1⨂n1)+(n2⨂n2))(cos(α)-1)
M1 = np.identity(N)
M2 = np.sin(theta) * (np.outer(normed2, normed1) - np.outer(normed1, normed2))
M3 = (np.cos(theta) - 1) * (np.outer(normed1, normed1) + np.outer(normed2, normed2))
return M1 + M2 + M3
def project_from_N(D, N, vecs):
# Project from dimension N to dimension N-1 w.r.t. camera at D
#Array indexing starts at 0, so decrement dimension by 1
N -= 1
#Convert to homogeneous coordinates
quotient = vecs[:,N] - D
newvecs = (((vecs.T) / quotient).T)
#Restore the original array values from index N on for depth cueing
newvecs[:,N:] = vecs[:,N:]
return newvecs
def plot_line(axes, point1, point2, edge_width):
# Plot line from pt1 to pt2, w/ line thickness scaled for depth cueing
width1 = edge_width / (point1[-1] - views[list(views.keys())[-1]])
width2 = edge_width / (point2[-1] - views[list(views.keys())[-1]])
width_range = np.linspace(width1, width2, 1000)
del_x = np.linspace(point1[0], point2[0], 1000)
del_y = np.linspace(point1[1], point2[1], 1000)
lwidths = width_range
points = np.array([del_x,del_y]).T.reshape(-1,1,2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
axes.add_collection(LineCollection(segments, linewidths=lwidths,color='w', alpha=1.0))
return axes
def plot_vertex(axes, vertex, size=0.01):
# Plot vertex node, w/ node size scaled for depth cueing
c0 = Circle((vertex[0], vertex[1]), size/(vertex[-1] - views[list(views.keys())[-1]]),
fc='r', ec='r', zorder=10)
axes.add_patch(c0)
return axes
# Set up the figure in matplotlib
fig, ax = plt.subplots(1, 3)
fig.set_figheight(6)
fig.set_figwidth(18)
# Set up three axes
for k in range(3):
ax[k].set(adjustable='box', aspect='equal')
ax[k].axes.xaxis.set_visible(False)
ax[k].axes.yaxis.set_visible(False)
ax[k].set_facecolor((0.5, 0.5, 0.5))
# Instantiate a 4-hypercube, a dodecaplex, and a 6-hypercube
hc4 = Nhypercube(4)
ddp = Dodecaplex()
hc6 = Nhypercube(6)
# Cycle through two full rotations
steps = 2
counter = 0
for step in range(steps):
frames = 100
d_theta = 2 * np.pi / (frames)
rot_hc4 = get_random_rotation(4, d_theta)
rot_ddp = get_random_rotation(4, d_theta)
rot_hc6 = get_random_rotation(6, d_theta)
for ind in range(frames):
print(f'{counter}/{steps*frames}') #To indicate progress
# Clear the axes after each frame
ax[0].clear()
ax[1].clear()
ax[2].clear()
ax[0].set_title('Hypercube (4-dimensional)', fontsize=14, fontweight='bold')
ax[1].set_title('Dodecaplex (4-dimensional)', fontsize=14, fontweight='bold')
ax[2].set_title('Hypercube (6-dimensional)', fontsize=14, fontweight='bold')
# Project from dimension N to dimension N - 1.
# Repeat until we reach dimension 2 (the display screen).
pts_hc4 = hc4.vertices
for k in range(4, 2, -1):
pts_hc4 = project_from_N(views[k], k, pts_hc4)
pts_ddp = ddp.vertices
for k in range(4, 2, -1):
pts_ddp = project_from_N(views[k], k, pts_ddp)
pts_hc6 = hc6.vertices
for k in range(6,2,-1):
pts_hc6 = project_from_N(views[k], k, pts_hc6)
# Plot the vertices (only for the 4-hypercube)
for vertex in range(hc4.vertices.shape[0]): # Circles representing the anchor points and the bobs
plot_vertex(ax[0], pts_hc4[vertex])
# Plot the edges
for j, k in hc4.edges:
plot_line(ax[0], pts_hc4[j,:], pts_hc4[k,:], 5)
for j, k in ddp.edges:
plot_line(ax[1], pts_ddp[j,:], pts_ddp[k,:], 1)
for j, k in hc6.edges:
plot_line(ax[2], pts_hc6[j,:], pts_hc6[k,:], 3)
# Set axes limits
ax[0].axis('equal')
ax[0].axis([-0.4, 0.4, -0.4, 0.4])
ax[1].axis('equal')
ax[1].axis([-1.8, 1.8, -1.8, 1.8])
ax[2].axis('equal')
ax[2].axis([-0.06, 0.06, -0.06, 0.06])
# Create & save the image
plt.savefig('frames/anim{:04d}.png'.format(counter), dpi=100)
counter += 1
# Incremental rotation of each object
hc4.vertices = np.dot(hc4.vertices, rot_hc4)
ddp.vertices = np.dot(ddp.vertices, rot_ddp)
hc6.vertices = np.dot(hc6.vertices, rot_hc6)
|
# Generated by Django 3.0.4 on 2020-03-26 03:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homechef', '0003_vendor_food_items'),
]
operations = [
migrations.CreateModel(
name='Ingredients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='fooditem',
name='ingredients',
field=models.ManyToManyField(to='homechef.Ingredients'),
),
]
|
# @Author: Jacob A Rose
# @Date: Tue, March 31st 2020, 1:24 am
# @Email: jacobrose@brown.edu
# @Filename: __init__.py
'''
pyleaves/pyleaves/base/__init__.py
base submodule of the pyleaves package
contains base classes for use throughout other submodules in pyleaves
'''
# import pdb;pdb.set_trace();print(__file__)
from . import base_data_manager
# import pdb;pdb.set_trace();print(__file__)
from . import base_model
# import pdb;pdb.set_trace();print(__file__)
from . import base_trainer
|
#!/usr/bin/env python3
w, a, b = map(int,input().split())
s = abs(a-b)
print([0, s-w][s>w])
|
from apps.workflow.models import CustomField
from service.base_service import BaseService
from service.common.log_service import auto_log
class WorkflowCustomFieldService(BaseService):
def __init__(self):
pass
@classmethod
@auto_log
def get_workflow_custom_field(cls, workflow_id):
"""
获取工作流的自定义字段信息
:param workflow_id:
:return:
"""
custom_field_queryset = CustomField.objects.filter(workflow_id=workflow_id, is_deleted=0).all()
format_custom_field_dict = {}
for custom_field in custom_field_queryset:
format_custom_field_dict[custom_field.field_key] = dict(workflow_id=custom_field.workflow_id, field_type_id=custom_field.field_type_id,
field_name=custom_field.field_name, order_id=custom_field.order_id,
default_value=custom_field.default_value, description=custom_field.description,
field_template=custom_field.field_template, boolean_field_display=custom_field.boolean_field_display,
field_choice=custom_field.field_choice)
return format_custom_field_dict, ''
|
import ctypes as C
import os
from typing import Final, Sequence, TypeVar
from astruct import typed_struct
from astruct.type_hints import *
from utils import CountedTable, ro_cached_property
from .classoritem import ClassOrItemTable
from .dungeoncategory import DungeonCategoryTable
from .fusioncompat import FusionCompatibilityTable
from .randomevent import RandomEventTable
from .skills import SkillTable
from .title import TitleTable
E = TypeVar('E', bound=AnyCType)
@typed_struct
class StartDatHeader(C.Structure):
_pack_ = 1
file_count: CUInt32 # TODO: this is probably 64 bits
_zero: CUInt32Array[3]
@typed_struct
class RawStartDatFileEntry(C.Structure):
_pack_ = 1
raw_end_offset: CUInt32
filename: CStr[28]
# TODO: combine this with RawStartDatFileEntry?
class StartDatFileEntry:
filename: str
offset: int
size: int
def __init__(self, raw_file: RawStartDatFileEntry, base_offset: int, file_offset: int) -> None:
self.filename = raw_file.filename
self.offset = file_offset
self.size = base_offset + raw_file.raw_end_offset - file_offset
class StartDatArchive:
STANDARD_FILENAME: Final = 'START.DAT'
_buffer: WriteableBuffer
_base_offset: int
_header: StartDatHeader
_raw_files: C.Array[RawStartDatFileEntry]
files: Sequence[StartDatFileEntry]
def __init__(self, buffer: WriteableBuffer, offset: int = 0) -> None:
self._buffer = buffer
self._base_offset = offset
self._header = StartDatHeader.from_buffer(self._buffer, # type: ignore[arg-type]
self._base_offset)
file_entries_offset = self._base_offset + C.sizeof(StartDatHeader)
RawFileEntriesArray = RawStartDatFileEntry * self._header.file_count
self._raw_files = RawFileEntriesArray.from_buffer(self._buffer, # type: ignore[arg-type]
file_entries_offset)
self._make_file_wrappers()
def _make_file_wrappers(self) -> None:
self.files = []
# Entries only record the offset of their final byte, relative to the
# beginning of the data. So the first file begins at the end of the
# file listing:
first_file_data_offset = self._base_offset + \
C.sizeof(self._header) + \
C.sizeof(self._raw_files)
offset = first_file_data_offset
for raw_file in self._raw_files:
wrapper = StartDatFileEntry(raw_file, first_file_data_offset, offset)
offset += wrapper.size
self.files.append(wrapper)
def find_file(self, name: str) -> StartDatFileEntry:
for file in self.files:
if file.filename == name:
return file
raise KeyError(f'File {name!r} not found in archive')
def get_file_as_table(self, filename: str, element_cls: type[E]) -> CountedTable[E]:
file_entry = self.find_file(filename)
return CountedTable(element_cls, self._buffer, file_entry.offset)
def extract_to_directory(self, dirname: str) -> None:
for f in self.files:
print(f'Extracting {f.filename} @ {f.offset:#x}: {f.size} bytes')
with open(os.path.join(dirname, f.filename), 'wb') as o:
o.write(self._buffer[f.offset:f.offset + f.size])
@ro_cached_property
def skilltab(self) -> SkillTable:
file_entry = self.find_file(SkillTable.STANDARD_FILENAME)
return SkillTable(self._buffer, file_entry.offset)
@ro_cached_property
def cattab(self) -> DungeonCategoryTable:
file_entry = self.find_file(DungeonCategoryTable.STANDARD_FILENAME)
return DungeonCategoryTable(self._buffer, file_entry.offset)
@ro_cached_property
def classtab(self) -> ClassOrItemTable:
file_entry = self.find_file(ClassOrItemTable.STANDARD_FILENAME)
return ClassOrItemTable(self._buffer, file_entry.offset)
@ro_cached_property
def compattab(self) -> FusionCompatibilityTable:
file_entry = self.find_file(FusionCompatibilityTable.STANDARD_FILENAME)
return FusionCompatibilityTable(self._buffer, file_entry.offset)
@ro_cached_property
def titletab(self) -> TitleTable:
file_entry = self.find_file(TitleTable.STANDARD_FILENAME)
return TitleTable(self._buffer, file_entry.offset)
@ro_cached_property
def eventtab(self) -> RandomEventTable:
file_entry = self.find_file(RandomEventTable.STANDARD_FILENAME)
return RandomEventTable(self._buffer, file_entry.offset)
|
# Generated by Django 3.0.3 on 2020-04-12 04:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='tel',
field=models.IntegerField(),
),
]
|
import filecmp
import pytest
from sequana.freebayes_vcf_filter import VCF_freebayes
from . import test_dir
sharedir = f"{test_dir}/data/vcf/"
def test_vcf_filter(tmpdir):
path = tmpdir.mkdir("temp")
vcf_output_expected = f"{sharedir}/JB409847.expected.vcf"
v = VCF_freebayes(f"{sharedir}/JB409847.vcf")
filter_dict = {'freebayes_score': 200, 'frequency': 0.85, 'min_depth': 10,
'forward_depth': 3, 'reverse_depth': 3, 'strand_ratio': 0.3}
filter_v = v.filter_vcf(filter_dict)
assert len(filter_v.variants) == 24
with open(path + "/test.vcf", "w") as ft:
filter_v.to_vcf(ft.name)
compare_file = filecmp.cmp(ft.name, vcf_output_expected)
assert compare_file
def test_constructor():
with pytest.raises(OSError):
VCF_freebayes('dummy')
def test_to_csv(tmpdir):
path = tmpdir.mkdir("temp")
filter_dict = {'freebayes_score': 200, 'frequency': 0.85, 'min_depth': 20,
'forward_depth': 3, 'reverse_depth': 3, 'strand_ratio': 0.3}
v = VCF_freebayes(f"{sharedir}/JB409847.expected.vcf")
filter_v = v.filter_vcf(filter_dict)
assert len(filter_v.variants) == 3
with open(path + "/test.csv", "w") as ft:
filter_v.to_csv(ft.name)
def test_variant():
v = VCF_freebayes(f"{sharedir}/JB409847.vcf")
variants = v.get_variants()
assert len(variants) == 64
print(variants[0])
v = VCF_freebayes(f"{sharedir}/test_vcf_snpeff.vcf")
variants = v.get_variants()
assert len(variants) == 775
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='estool',
version='1.0',
description='Implementation of various Evolution Strategies',
py_modules=['config', 'es', 'env', 'model', 'train'],
)
|
import streamlit as st
import altair as alt
import pandas as pd
import json
import numpy as np
from pathlib import Path
from autobrat.classifier import Model
from autobrat.data import load_training_data
from scripts.score import subtaskA, subtaskB, compute_metrics
from scripts.utils import Collection
def get_corpora_list():
return [p for p in Path("/data/").iterdir() if p.is_dir()]
corpora = get_corpora_list()
corpus = st.selectbox("Select corpus", corpora, format_func=lambda p: p.name)
@st.cache
def load_training_logs():
data = []
with open("/data/results_assisted_training.json") as fp:
for line in fp:
data.append(json.loads(line))
return pd.DataFrame(data)
@st.cache
def load_threshold_logs():
data = []
with open("/data/results_threshold.json") as fp:
for line in fp:
d = {}
for k,v in json.loads(line).items():
if isinstance(v, dict):
for k2,v2 in v.items():
d[f"{k}_{k2}"] = v2
else:
d[k] = v
data.append(d)
return pd.DataFrame(data)
@st.cache
def load_collection(corpus):
return load_training_data(corpus)
MISSING_COST = st.sidebar.number_input("MISSING_COST", 0.0, 10.0, 1.0)
SPURIOUS_COST = st.sidebar.number_input("SPURIOUS_COST", 0.0, 10.0, 2.0)
INCORRECT_COST = st.sidebar.number_input("INCORRECT_COST", 0.0, 10.0, 0.5)
CORRECT_COST = st.sidebar.number_input("CORRECT_COST", 0.0, 10.0, 0.25)
PARTIAL_COST = st.sidebar.number_input("PARTIAL_COST", 0.0, 10.0, 0.25)
def compute_metrics2(
data,
missing_cost=MISSING_COST,
spurious_cost=SPURIOUS_COST,
incorrect_cost=INCORRECT_COST,
correct_cost=CORRECT_COST,
partial_cost=PARTIAL_COST,
skipA=False,
skipB=False,
):
metrics = compute_metrics(data, skipA=skipA, skipB=skipB)
correct = 0
partial = 0
incorrect = 0
missing = 0
spurious = 0
if not skipA:
correct += len(data["correct_A"])
incorrect += len(data["incorrect_A"])
partial += len(data["partial_A"])
missing += len(data["missing_A"])
spurious += len(data["spurious_A"])
if not skipB:
correct += len(data["correct_B"])
missing += len(data["missing_B"])
spurious += len(data["spurious_B"])
cost = (
missing_cost * missing
+ spurious_cost * spurious
+ incorrect_cost * incorrect
+ correct_cost * correct
+ partial_cost * partial
)
metrics["cost"] = cost
return metrics
def compute_score(true, predicted):
dataA = subtaskA(true, predicted)
dataB = subtaskB(true, predicted, dataA)
return dict(
subtaskA=compute_metrics2(dataA, skipB=True),
subtaskB=compute_metrics2(dataB, skipA=True),
overall=compute_metrics2(dict(dataA, **dataB)),
)
def load_all():
collection = load_collection(corpus)
training_size = st.number_input(
"Training size",
1,
len(collection.sentences),
int(len(collection.sentences) / 1.3),
)
testing_size = st.number_input(
"Testing size",
1,
len(collection.sentences) - training_size,
min(len(collection.sentences) - training_size, training_size),
)
training_collection = collection[:training_size]
testing_collection = collection[-testing_size:]
return collection, training_collection, testing_collection
callback_msg = st.empty()
callback_progress = st.empty()
def callback(msg, current, total):
callback_msg.markdown(f"{msg}: {current}/{total}")
callback_progress.progress(current / total)
experiment = st.selectbox(
"Experiment",
[
"Entities",
"Similarity",
"Relations",
"Full training",
"Assisted comparison",
"Pre-computed graphs",
],
)
if experiment == "Full training":
collection, training_collection, testing_collection = load_all()
negative_sampling = st.slider("Negative sampling", 0.0, 1.0, 0.25)
# max_entity_uncertainty = st.slider("Max entity uncertainty", 0.0, 10.0, 10.0)
# max_relation_uncertainty = st.slider("Max relation uncertainty", 0.0, 10.0, 10.0)
model = Model(training_collection, callback, negative_sampling=negative_sampling,)
# pool = st.text_area(
# "Sentences to score",
# """Entre los nutrientes se incluyen las proteínas, carbohidratos, grasas, vitaminas, minerales y agua.
# El moho está formado por hongos que pueden encontrarse en exteriores o interiores.
# Puede ser una lumpectomía o una mastectomía.
# Las estatinas son drogas usadas para bajar el colesterol.
# Los síndromes mielodisplásicos son poco comunes.""",
# ).split("\n")
if st.button("Train"):
fp = open("/data/results_threshold_temp.json", "w")
st.write("### Gold score")
blank_collection = testing_collection.clone()
for sentence in blank_collection:
sentence.keyphrases = []
sentence.relations = []
score = compute_score(testing_collection, blank_collection)
st.write(score)
model.train()
for e in np.arange(0, 3, 0.1):
for r in np.arange(0, 3, 0.1):
model.max_entity_uncertainty = e
model.max_relation_uncertainty = r
st.write(f"### Score with uncertainty for entity={e}; relation={r}")
predicted = model.predict(testing_collection.sentences)
score = compute_score(testing_collection, predicted)
# st.write(score)
# st.write(score)
score['entity_threshold'] = e
score['relation_threshold'] = r
fp.write(json.dumps(score) + "\n")
fp.flush()
fp.close()
# if pool:
# for s in pool:
# st.write(model.score_sentence(s, return_dict=True))
elif experiment == "Entities":
collection, training_collection, testing_collection = load_all()
model = Model(None, callback)
i = st.slider("Sentence", 0, len(collection) - 1, 0)
doc, features = model.entity_classifier.feature_sentence(collection[i].text)
st.write(pd.DataFrame(features))
elif experiment == "Relations":
collection, training_collection, testing_collection = load_all()
model = Model(training_collection, callback)
i = st.slider("Sentence", 0, len(collection) - 1, 0)
st.code(training_collection.sentences[i].text)
r = st.selectbox("Relation", training_collection.sentences[i].relations)
features = model.entity_classifier.relation_features(r)
st.write(features)
elif experiment == "Similarity":
collection, training_collection, testing_collection = load_all()
model = Model(training_collection, callback)
model.train_similarity()
correct_0 = 0
correct_5 = 0
for i, sentence in enumerate(training_collection.sentences):
doc, _ = model.entity_classifier.feature_sentence(sentence.text)
tokens = [token.text for token in doc]
inferred_vector = model.doc2vec.infer_vector(tokens)
sims = model.doc2vec.docvecs.most_similar([inferred_vector], topn=5)
j, _ = sims[0]
if j == i:
correct_0 += 1
if i in [sim[0] for sim in sims]:
correct_5 += 1
st.write(
f"Correct={correct_0}, ({correct_0 / len(training_collection):.2f}), Correct(5)={correct_5}, ({correct_5 / len(training_collection):.2f})"
)
i = st.slider("Sentence", 0, len(training_collection) - 1, 0)
doc, _ = model.entity_classifier.feature_sentence(sentence.text)
tokens = [token.text for token in doc]
st.code(tokens)
inferred_vector = model.doc2vec.infer_vector(tokens)
st.write(inferred_vector)
sims = model.doc2vec.docvecs.most_similar([inferred_vector], topn=5)
for i, v in sims:
st.code((v, training_collection.sentences[i].text))
elif experiment == "Assisted comparison":
collection, training_collection, testing_collection = load_all()
chart = st.altair_chart(
alt.Chart(pd.DataFrame())
.mark_line()
.encode(
x="batch:Q", y="value:Q", color="type:N", row="metric:N", column="task:N"
)
.properties(width=200, height=100,)
)
def full_training(
training_collection: Collection, testing_collection: Collection, batch_size
):
model = Model(training_collection, callback)
model.train()
predicted = model.predict(testing_collection.sentences)
score = compute_score(testing_collection, predicted)
for batch_end in range(batch_size, len(training_collection), batch_size):
for task in ["overall", "subtaskA", "subtaskB"]:
for metric in ["f1", "precision", "recall"]:
yield [
dict(
batch=batch_end,
value=score[task][metric],
type="full",
metric=metric,
task=task,
),
]
def random_training(
training_collection: Collection, testing_collection: Collection, batch_size
):
for batch_end in range(batch_size, len(training_collection), batch_size):
batch = training_collection[:batch_end]
model = Model(batch, callback)
model.train()
predicted = model.predict(testing_collection)
score = compute_score(testing_collection, predicted)
for task in ["overall", "subtaskA", "subtaskB"]:
for metric in ["f1", "precision", "recall"]:
yield [
dict(
batch=batch_end,
value=score[task][metric],
type="random",
metric=metric,
task=task,
),
]
def assisted_training(
training_collection: Collection,
testing_collection: Collection,
batch_size,
suggest_mode,
):
sentences_pool = set([s.text for s in training_collection.sentences])
training_pool = training_collection[:batch_size]
model = Model(training_pool, callback, suggest_mode=suggest_mode)
model.train()
for batch_end in range(
batch_size, len(training_collection) - batch_size, batch_size
):
suggestion_pool = list(
sentences_pool - set(s.text for s in training_pool.sentences)
)
suggestions = set(model.suggest(suggestion_pool, batch_size))
training_pool.sentences.extend(
s for s in training_collection.sentences if s.text in suggestions
)
model = Model(training_pool, callback)
model.train()
predicted = model.predict(testing_collection)
score = compute_score(testing_collection, predicted)
for task in ["overall", "subtaskA", "subtaskB"]:
for metric in ["f1", "precision", "recall"]:
yield [
dict(
batch=batch_end,
value=score[task][metric],
type=f"assisted-{suggest_mode}",
metric=metric,
task=task,
),
]
batch_size = st.number_input("Batch size", 1, 100, 10)
if st.button("Run"):
all_scores = []
open("/data/results_assisted_training_temp.json", "w").close()
for scores in full_training(
training_collection, testing_collection, batch_size
):
chart.add_rows(scores)
all_scores.extend(scores)
with open("/data/results_assisted_training_temp.json", "a") as fp:
for score in scores:
fp.write(json.dumps(score))
fp.write("\n")
for r1, r2, r3, r4 in zip(
random_training(training_collection, testing_collection, batch_size),
assisted_training(
training_collection, testing_collection, batch_size, "full"
),
assisted_training(
training_collection, testing_collection, batch_size, "entity"
),
assisted_training(
training_collection, testing_collection, batch_size, "relation"
),
):
chart.add_rows(r1 + r2 + r3 + r4)
all_scores.extend(r1 + r2 + r3 + r4)
with open("/data/results_assisted_training_temp.json", "a") as fp:
for score in r1 + r2 + r3 + r4:
fp.write(json.dumps(score))
fp.write("\n")
st.write(pd.DataFrame(all_scores))
elif experiment == "Pre-computed graphs":
data = load_training_logs()
st.write(data.head(100))
models = list(data["type"].unique())
model = st.multiselect("Model", models, models)
metric = st.selectbox("Metric", data["metric"].unique())
task = st.selectbox("Task", data["task"].unique())
batch_min, batch_max = st.slider(
"Batch range", 0, int(data["batch"].max()), (0, int(data["batch"].max()))
)
df = data[
(data["type"].isin(model))
& (data["metric"] == metric)
& (data["task"] == task)
& (data["batch"] >= batch_min)
& (data["batch"] <= batch_max)
].copy()
smooth_factor = st.number_input("Smooth factor", 0, 100, 0)
# TODO: apply smoothing
st.altair_chart(
alt.Chart(df)
.mark_line()
.encode(
x=alt.X("batch", title="Sentences annotated"),
y=alt.Y("value", title=metric.title(), scale=alt.Scale(zero=False)),
color=alt.Color("type", title="Model"),
),
use_container_width=True,
)
target = df[df['type'] == 'full']['value'].max()
steps = [0, 0.8, 0.85, 0.9, 0.95, 1.0]
df['relative'] = df['value'] / target
df['relative_clamp'] = df['relative'].apply(lambda x: max(s for s in steps if x >= s))
df = df[(df['type'] != 'full') & (df['relative_clamp'] >= 0.8)]
st.altair_chart(
alt.Chart(df).mark_bar().encode(
column=alt.Column('relative_clamp:N', title="Relative fitness"),
x=alt.X('type', title=None),
y=alt.Y('min(batch)', stack=False),
color='type',
)
)
st.write(df.groupby(['type', 'relative_clamp']).agg(min_batch=('batch', 'min'), avg_batch=('batch', 'mean')))
data = load_threshold_logs().copy()
baseline = data['overall_cost'][0]
st.write(baseline)
data['relative_improvement'] = (baseline - data['overall_cost']) / baseline
data['relative_improvement_abs'] = data['relative_improvement'].abs()
data['improves'] = data['overall_cost'] < baseline
st.write(data)
st.write("Optimal entity F1: %.3f" % data['subtaskA_f1'].max())
st.write("Optimal relation F1: %.3f" % data['subtaskB_f1'].max())
st.write("Optimal cost improvement: %.3f" % data['relative_improvement'].max())
st.altair_chart(
alt.Chart(data).mark_circle().encode(
x=alt.X('entity_threshold:Q', title="Entity threshold"),
y=alt.Y('relation_threshold:Q', title="Relation threshold"),
size=alt.Size('relative_improvement_abs', legend=None),
color=alt.Color('improves', scale=alt.Scale(range=['red', 'green']), legend=None),
).properties(
width=450,
height=400,
)
)
|
import resources
from dodge.game.state import GameState, GameStatus
from dodge.game.runner import GameRunner
from dodge.config import Config
from dodge.level import SillyLevelBuilder
import dodge.ui as ui
class Game(object):
def __init__(self):
self.config = Config(resources.config)
self.window = ui.UI(self.config)
self.input_handler = ui.InputHandler()
self.current_game_state = None # type: GameState
self.runner = None # type: GameRunner
self.game_status = None # type: GameStatus
def start_new_game(self):
self.current_game_state = GameState(self.config, SillyLevelBuilder)
self.game_status = self.current_game_state.status
level_renderer = ui.LevelRenderer(self.window.console, self.window.panel, self.current_game_state.level,
self.config)
level_renderer.render_all(0)
self.runner = GameRunner(self.current_game_state, self.input_handler, level_renderer, self.window)
self.runner.play_game()
def continue_game(self):
self.runner.play_game()
def main_menu(self):
while True:
# If you died in the last round, print a death screen
if self.game_status is not None and self.game_status.is_status(self.game_status.PLAYER_DEATH):
self.window.display_text('YOU DIED')
choice = self.window.main_menu()
if choice == 0:
self.start_new_game()
# TODO: Only can continue if game in progress/have a save
elif choice == 1:
self.continue_game()
elif choice == 2:
break
game = Game()
game.main_menu()
|
#!/usr/bin/env python3
""" Script for addressing CARRY4 output congestion in elaborated netlists.
Usage:
python3 fix_carry.py < input-netlist-json > output-netlist-json
Description:
In the 7-series SLICEL (and SLICEM) sites, there can be output congestion
if both the CO and O of the CARRY4 are used. This congestion can be
avoided by using a transparent/open latch or register on the output of the
CARRY4.
VPR does not currently support either of those options, so for now, if
both CO and O are used, the CO output is converted into a LUT equation to
recompute the CO output from O, DI and S. See carry_map.v and
clean_carry_map.v for details.
If VPR could emit the transparent/open latch on output congestion, this
would no longer be required. The major problem with transparent latch
support is that it requires constants to be routed to the G/GE/CLR/PRE
ports, which VPR cannot express as a result of packing.
This script identifies CARRY4 chains in the netlist, identifies if there
is output congestion on the O and CO ports, and marks the congestion by
changing CARRY_CO_DIRECT (e.g. directly use the CO port) to CARRY_CO_LUT
(compute the CO value using a LUT equation).
Diagram showing one row of the 7-series CLE, focusing on O/CO congestion.
This diagram shows that if both the O and CO outputs are needed, once must
pass through the flip flop (xFF in the diagram).
CLE Row
+--------------------------------------------------------------------------+
| |
| |
| +---+ |
| | + |
| | + |
| +-------->+ O + |
| CO CHAIN | | + |
| | | +---------------------> xMUX
| ^ | +---->+ CO + |
| | | | | + |
| | | | | + |
| +---------+----------+ | | | + |
| | | | | +---+ |
| | CARRY ROW | | | |
| +--->+ S O +--------+ | xOUTMUX |
| | | | | |
| | | + | |
| +--->+ DI CO +-------+o+--+ |
| | CI CHAIN | + | |
| | | | | |
| +---------+----------+ | | xFFMUX |
| ^ | | |
| | | | +---+ |
| + | | | + |
| | + | + +-----------+ |
| +--+o+--->+ O + | | |
| + | + | xFF | |
| | | +->--D---- Q +------> xQ
| | | + | | |
| +---->+ CO + | | |
| | + +-----------+ |
| | + |
| +---+ |
| |
| |
+--------------------------------------------------------------------------+
This script operates on a slightly different cell structure than a plain CARRY4.
carry_map.v converts the CARRY4 into:
+------------------+ +-----------------+
| | | |
| CO3 +->+ CARRY_CO_DIRECT |
| | | |
| DI3 | +-----------------+
| |
| S3 O3 |
| |
| DI2 | +-----------------+
| | | |
| S2 CO2 +->+ CARRY_CO_DIRECT |
| | | |
| DI1 | +-----------------+
| |
| S1 O2 |
| CARRY4 |
| DI0 (chained) | +-----------------+
| | | |
| S0 CO1 +->+ CARRY_CO_DIRECT |
| | | |
| CYINIT | +-----------------+
| |
+-----------------+ | O1 |
| | | |
+->+ CARRY_COUT_PLUG +->+ CI | +-----------------+
| | | | | | |
| +-----------------+ | CO0 +->+ CARRY_CO_DIRECT |
| | | | |
| | | +-----------------+
+-------------------+ | |
| | O0 |
+------------------+ +-----------------+ | | |
| | | | | +------------------+
| CO3 +->+ CARRY_CO_DIRECT +-+
| | | |
| DI3 | +-----------------+
| |
| S3 O3 |
| |
| DI2 | +-----------------+
| | | |
| S2 CO2 +->+ CARRY_CO_DIRECT |
| | | |
| DI1 | +-----------------+
| |
| S1 O2 |
| CARRY4 |
| DI0 (root) | +-----------------+
| | | |
| S0 CO1 +->+ CARRY_CO_DIRECT |
| | | |
| CYINIT | +-----------------+
| |
| O1 |
| |
| CI | +-----------------+
| | | |
| CO0 +->+ CARRY_CO_DIRECT |
| | | |
| | +-----------------+
| |
| O0 |
| |
+------------------+
Each CARRY4 spans the 4 rows of the SLICEL/SLICEM.
Row 0 is the S0/DI0/O0/CO0 ports, row 1 is S1/DI1/O1/CO1 ports, etc.
So there are five cases the script has to handle:
- No congestion is present between O and CO ->
Do nothing.
- Congestion is present on rows 0-2 and row above is in use ->
Change CARRY_CO_DIRECT to CARRY_CO_LUT.
Routing and LUT delays are incurred in this case.
- Congestion is present on rows 0-2 and row above is not in use ->
Remap CO to O from the row above, and set S on the next row to 0 to
ensure O outputs CI from the row below.
No additional delays for this change.
- Congestion is present on row 3 and CO3 is not connected to another CARRY ->
Change CARRY_CO_DIRECT to CARRY_CO_TOP_POP. This adds 1 dummy layer to
the carry chain to output the CO.
No additional delays for this change.
- Congestion is present on row 3 and CO3 is connected directly to another
CARRY4 ->
Change CARRY_CO_DIRECT to CARRY_CO_LUT *and* change the chained
CARRY_COUT_PLUG to be directly connected to the previous CO3.
Routing and LUT delays are incurred in this case.
Diagram for this case:
+-------------------+ +-----------------+
| | | |
| CO3 +->+ CARRY_CO_DIRECT |
| | | |
| DI3 | +-----------------+
| |
| S3 O3 |
| |
| DI2 | +-----------------+
| | | |
| S2 CO2 +->+ CARRY_CO_DIRECT |
| | | |
| DI1 | +-----------------+
| |
| S1 O2 |
| CARRY4 |
| DI0 (chained) | +-----------------+
| | | |
| S0 CO1 +->+ CARRY_CO_DIRECT |
| | | |
| CYINIT | +-----------------+
| |
+-----------------+ | O1 |
| | | |
+->+ CARRY_COUT_PLUG +--->+ CI | +-----------------+
| | | | | | |
| +-----------------+ | CO0 +->+ CARRY_CO_DIRECT |
| | | | |
+-------------------+ | +-----------------+ | | +-----------------+
| | | | | | |
| CO3 +--+->+ CARRY_CO_LUT +-+ | O0 |
| | | | | | |
| DI3 | +-----------------+ | +-------------------+
| | |
| S3 O3 | +------>
| |
| DI2 | +-----------------+
| | | |
| S2 CO2 +---->+ CARRY_CO_DIRECT |
| | | |
| DI1 | +-----------------+
| |
| S1 O2 |
| CARRY4 |
| DI0 (root) | +-----------------+
| | | |
| S0 CO1 +---->+ CARRY_CO_DIRECT |
| | | |
| CYINIT | +-----------------+
| |
| O1 |
| |
| CI | +-----------------+
| | | |
| CO0 +---->+ CARRY_CO_DIRECT |
| | | |
| | +-----------------+
| |
| O0 |
| |
+-------------------+
After this script is run, clean_carry_map.v is used to convert CARRY_CO_DIRECT
into a direct connection, and CARRY_CO_LUT is mapped to a LUT to compute the
carry output.
"""
import json
import sys
def find_top_module(design):
"""
Looks for the top-level module in the design. Returns its name. Throws
an exception if none was found.
"""
for name, module in design["modules"].items():
attrs = module["attributes"]
if "top" in attrs and int(attrs["top"]) == 1:
return name
raise RuntimeError("No top-level module found in the design!")
def find_carry4_chains(design, top_module, bit_to_cells):
""" Identify CARRY4 carry chains starting from the root CARRY4.
All non-root CARRY4 cells should end up as part of a chain, otherwise
an assertion is raised.
Arguments:
design (dict) - "design" field from Yosys JSON format
top_module (str) - Name of top module.
bit_to_cells (dict) - Map of net bit identifier and cell information.
Computes in "create_bit_to_cell_map".
Returns:
list of list of strings - List of CARRY4 chains. Each chain is a list
of cellnames. The cells are listed in chain order, starting from
the root.
"""
cells = design["modules"][top_module]["cells"]
used_carry4s = set()
root_carry4s = []
nonroot_carry4s = {}
for cellname in cells:
cell = cells[cellname]
if cell["type"] != "CARRY4_VPR":
continue
connections = cell["connections"]
if "CIN" in connections:
cin_connections = connections["CIN"]
assert len(cin_connections) == 1
# Goto driver of CIN, should be a CARRY_COUT_PLUG.
plug_cellname, port, bit_idx = bit_to_cells[cin_connections[0]][0]
plug_cell = cells[plug_cellname]
assert plug_cell["type"] == "CARRY_COUT_PLUG", plug_cellname
assert port == "COUT"
plug_connections = plug_cell["connections"]
cin_connections = plug_connections["CIN"]
assert len(cin_connections) == 1
# Goto driver of CIN, should be a CARRY_CO_DIRECT.
direct_cellname, port, bit_idx = bit_to_cells[cin_connections[0]
][0]
direct_cell = cells[direct_cellname]
assert direct_cell["type"] == "CARRY_CO_DIRECT", direct_cellname
assert port == "OUT"
direct_connections = direct_cell["connections"]
co_connections = direct_connections["CO"]
assert len(co_connections) == 1
nonroot_carry4s[co_connections[0]] = cellname
else:
used_carry4s.add(cellname)
root_carry4s.append(cellname)
# Walk from each root CARRY4 to each child CARRY4 module.
chains = []
for cellname in root_carry4s:
chain = [cellname]
while True:
# Follow CO3 to the next CARRY4, if any.
cell = cells[cellname]
connections = cell["connections"]
co3_connections = connections.get("CO3", None)
if co3_connections is None:
# No next CARRY4, stop here.
break
found_next_link = False
for connection in co3_connections:
next_cellname = nonroot_carry4s.get(connection, None)
if next_cellname is not None:
cellname = next_cellname
used_carry4s.add(cellname)
chain.append(cellname)
found_next_link = True
break
if not found_next_link:
break
chains.append(chain)
# Make sure all non-root CARRY4's got used.
for bit, cellname in nonroot_carry4s.items():
assert cellname in used_carry4s, (bit, cellname)
return chains
def create_bit_to_cell_map(design, top_module):
""" Create map from net bit identifier to cell information.
Arguments:
design (dict) - "design" field from Yosys JSON format
top_module (str) - Name of top module.
Returns:
bit_to_cells (dict) - Map of net bit identifier and cell information.
The map keys are the net bit identifier used to mark which net a cell port
is connected too. The map values are a list of cell ports that are in the
net. The first element of the list is the driver port, and the remaining
elements are sink ports.
The list elements are 3-tuples with:
cellname (str) - The name of the cell this port belongs too
port (str) - The name of the port this element is connected too.
bit_idx (int) - For multi bit ports, a 0-based index into the port.
"""
bit_to_cells = {}
cells = design["modules"][top_module]["cells"]
for cellname in cells:
cell = cells[cellname]
port_directions = cell["port_directions"]
for port, connections in cell["connections"].items():
is_output = port_directions[port] == "output"
for bit_idx, bit in enumerate(connections):
list_of_cells = bit_to_cells.get(bit, None)
if list_of_cells is None:
list_of_cells = [None]
bit_to_cells[bit] = list_of_cells
if is_output:
# First element of list of cells is net driver.
assert list_of_cells[0] is None, (
bit, list_of_cells[0], cellname
)
list_of_cells[0] = (cellname, port, bit_idx)
else:
list_of_cells.append((cellname, port, bit_idx))
return bit_to_cells
def is_bit_used(bit_to_cells, bit):
""" Is the net bit specified used by any sinks? """
list_of_cells = bit_to_cells[bit]
return len(list_of_cells) > 1
def is_bit_used_other_than_carry4_cin(design, top_module, bit, bit_to_cells):
""" Is the net bit specified used by any sinks other than a carry chain? """
cells = design["modules"][top_module]["cells"]
list_of_cells = bit_to_cells[bit]
assert len(list_of_cells) == 2, bit
direct_cellname, port, _ = list_of_cells[1]
direct_cell = cells[direct_cellname]
assert direct_cell['type'] == "CARRY_CO_DIRECT"
assert port == "CO"
# Follow to output
connections = direct_cell["connections"]["OUT"]
assert len(connections) == 1
for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]:
cell = cells[cellname]
if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN":
continue
else:
return True, direct_cellname
return False, direct_cellname
def create_bit_to_net_map(design, top_module):
""" Create map from net bit identifier to net information.
Arguments:
design (dict) - "design" field from Yosys JSON format
top_module (str) - Name of top module.
Returns:
bit_to_nets (dict) - Map of net bit identifier to net information.
"""
bit_to_nets = {}
nets = design["modules"][top_module]["netnames"]
for net in nets:
for bit_idx, bit in enumerate(nets[net]["bits"]):
bit_to_nets[bit] = (net, bit_idx)
return bit_to_nets
def fixup_cin(design, top_module, bit_to_cells, co_bit, direct_cellname):
""" Move connection from CARRY_CO_LUT.OUT -> CARRY_COUT_PLUG.CIN to
directly to preceeding CARRY4.
"""
cells = design["modules"][top_module]["cells"]
direct_cell = cells[direct_cellname]
assert direct_cell["type"] == "CARRY_CO_LUT"
# Follow to output
connections = direct_cell["connections"]["OUT"]
assert len(connections) == 1
for cellname, port, bit_idx in bit_to_cells[connections[0]][1:]:
cell = cells[cellname]
if cell["type"] == "CARRY_COUT_PLUG" and port == "CIN":
assert bit_idx == 0
cells[cellname]["connections"]["CIN"][0] = co_bit
def fixup_congested_rows(design, top_module, bit_to_cells, bit_to_nets, chain):
""" Walk the specified carry chain, and identify if any outputs are congested.
Arguments:
design (dict) - "design" field from Yosys JSON format
top_module (str) - Name of top module.
bit_to_cells (dict) - Map of net bit identifier and cell information.
Computes in "create_bit_to_cell_map".
bit_to_nets (dict) - Map of net bit identifier to net information.
Computes in "create_bit_to_net_map".
chain (list of str) - List of cells in the carry chain.
"""
cells = design["modules"][top_module]["cells"]
O_ports = ["O0", "O1", "O2", "O3"]
CO_ports = ["CO0", "CO1", "CO2", "CO3"]
def check_if_rest_of_carry4_is_unused(cellname, cell_idx):
assert cell_idx < len(O_ports)
cell = cells[cellname]
connections = cell["connections"]
for o, co in zip(O_ports[cell_idx:], CO_ports[cell_idx:]):
o_conns = connections[o]
assert len(o_conns) == 1
o_bit = o_conns[0]
if is_bit_used(bit_to_cells, o_bit):
return False
co_conns = connections[co]
assert len(co_conns) == 1
co_bit = co_conns[0]
if is_bit_used(bit_to_cells, co_bit):
return False
return True
# Carry chain is congested if both O and CO is used at the same level.
# CO to next element in the chain is fine.
for chain_idx, cellname in enumerate(chain):
cell = cells[cellname]
connections = cell["connections"]
for cell_idx, (o, co) in enumerate(zip(O_ports, CO_ports)):
o_conns = connections[o]
assert len(o_conns) == 1
o_bit = o_conns[0]
co_conns = connections[co]
assert len(co_conns) == 1
co_bit = co_conns[0]
is_o_used = is_bit_used(bit_to_cells, o_bit)
is_co_used, direct_cellname = is_bit_used_other_than_carry4_cin(
design, top_module, co_bit, bit_to_cells
)
if is_o_used and is_co_used:
# Output at this row is congested.
direct_cell = cells[direct_cellname]
if co == 'CO3' and chain_idx == len(chain) - 1:
# This congestion is on the top of the carry chain,
# emit a dummy layer to the chain.
direct_cell["type"] = "CARRY_CO_TOP_POP"
assert int(direct_cell["parameters"]["TOP_OF_CHAIN"]) == 1
# If this is the last CARRY4 in the chain, see if the
# remaining part of the chain is idle.
elif chain_idx == len(chain) - 1 and \
check_if_rest_of_carry4_is_unused(cellname, cell_idx + 1):
# Because the rest of the CARRY4 is idle, it is safe to
# use the next row up to output the top of the carry.
connections["S{}".format(cell_idx + 1)] = ["1'b0"]
next_o_conns = connections[O_ports[cell_idx + 1]]
assert len(next_o_conns) == 1
direct_cell["connections"]["CO"][0] = next_o_conns[0]
netname, bit_idx = bit_to_nets[next_o_conns[0]]
assert bit_idx == 0
# Update annotation that this net is now in use.
net = design["module"][top_module]["netnames"][netname]
assert net["attributes"].get("unused_bits", None) == "0 "
del net["attributes"]["unused_bits"]
else:
# The previous two stragies (use another layer of carry)
# only work for the top of the chain. This appears to be
# in the middle of the chain, so just spill it out to a
# LUT, and fixup the direct carry chain (if any).
direct_cell["type"] = "CARRY_CO_LUT"
fixup_cin(
design, top_module, bit_to_cells, co_bit,
direct_cellname
)
def main():
design = json.load(sys.stdin)
top_module = find_top_module(design)
bit_to_cells = create_bit_to_cell_map(design, top_module)
bit_to_nets = create_bit_to_net_map(design, top_module)
for chain in find_carry4_chains(design, top_module, bit_to_cells):
fixup_congested_rows(
design, top_module, bit_to_cells, bit_to_nets, chain
)
json.dump(design, sys.stdout, indent=2)
if __name__ == "__main__":
main()
|
# Why does this file exist, and why not put this in `__main__`?
#
# You might be tempted to import things from `__main__` later,
# but that will cause problems: the code will get executed twice:
#
# - When you run `python -m pawabot` python will execute
# `__main__.py` as a script. That means there won't be any
# `pawabot.__main__` in `sys.modules`.
# - When you import `__main__` it will get executed again (as a module) because
# there's no `pawabot.__main__` in `sys.modules`.
"""Module that contains the command line application."""
import argparse
import logging
import os
import sys
from pathlib import Path
from typing import List, Optional
from loguru import logger
from privibot import User
from privibot import callbacks as privcallbacks
from privibot import init
from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler, Updater
from . import callbacks
from .utils import get_data_dir
DATA_DIR = get_data_dir()
def get_parser() -> argparse.ArgumentParser:
"""
Return the CLI argument parser.
Returns:
An argparse parser.
"""
parser = argparse.ArgumentParser(prog="pawabot")
subparsers = parser.add_subparsers(dest="subcommand", title="Commands", metavar="", prog="pawabot")
subcommand_help = "Show this help message and exit."
global_options = parser.add_argument_group(title="Global options")
global_options.add_argument(
"-L",
"--log-level",
dest="log_level",
default="INFO",
help="Log level to use",
choices=("TRACE", "DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"),
type=str.upper,
)
# global_options.add_argument(
# "-P", "--log-path", dest="log_path", default=None, help="Log path to use. Can be a directory or a file."
# )
def create_subparser(command, text, **kwargs):
sub = subparsers.add_parser(command, add_help=False, help=text, description=text, **kwargs)
sub.add_argument("-h", "--help", action="help", help=subcommand_help)
return sub
create_subparser("run", "Run the bot.")
create_admin = create_subparser("create-admin", "Create an administrator in the database.")
create_admin.add_argument("-i", "--uid", dest="uid", help="Telegram user id.")
create_admin.add_argument("-u", "--username", dest="username", help="Telegram user name.")
create_user = create_subparser("create-user", "Create a user in the database.")
create_user.add_argument("-i", "--uid", dest="uid", help="Telegram user id.")
create_user.add_argument("-u", "--username", dest="username", help="Telegram user name.")
create_user.add_argument("-a", "--admin", action="store_true", dest="admin", help="Give admin access.")
create_subparser("list-users", "List registered users.")
# delete_users = subparser("delete-users", "Delete users by ID.")
# delete_users.add_argument("uids", nargs="+", dest="uids", help="IDs of the users to delete.")
# TODO: list-privileges
# TODO: grant
# TODO: revoke
return parser
def main(args: Optional[List[str]] = None) -> int:
"""
Run the main program.
This function is executed when you type `pawabot` or `python -m pawabot`.
Arguments:
args: Arguments passed from the command line.
Returns:
An exit code.
"""
parser = get_parser()
args = parser.parse_args(args=args)
def log_level_to_name(level):
for log_name in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
if level == getattr(logging, log_name):
return log_name
class InterceptHandler(logging.Handler):
def emit(self, record):
# Retrieve context where the logging call occurred, this happens to be in the 6th frame upward
logger_opt = logger.opt(depth=6, exception=record.exc_info)
logger_opt.log(log_level_to_name(record.levelno), record.getMessage())
log_level = args.log_level
logger.configure(
handlers=[
{
"sink": sys.stdout,
"format": "{time:YYYY-MM-DD HH:mm:ss.SSS} | <lvl>{level:<8}</lvl> | {message}",
"level": log_level,
}
]
)
logging.basicConfig(handlers=[InterceptHandler()], level=0)
init(db_path="sqlite:///" + str(DATA_DIR / "db.sqlite3"))
# with open("owner_id.txt") as stream:
# OWNER_ID = stream.read().rstrip("\n")
if args.subcommand == "create-admin":
User.create(uid=args.uid, username=args.username, is_admin=True)
return 0
elif args.subcommand == "create-user":
User.create(uid=args.uid, username=args.username, is_admin=args.admin)
return 0
elif args.subcommand == "list-users":
print(f"{'ID':>10} {'USERNAME':<20} ADMIN")
print("---------------------------------------")
for user in User.all():
print(f"{user.uid:>10} {user.username:<20} {user.is_admin}")
# TODO: also show privileges
return 0
# elif args.subcommand == "delete-users":
# for uid in args.uids:
# User
elif args.subcommand == "run":
if "BOT_TOKEN" in os.environ:
bot_token = os.environ.get("BOT_TOKEN")
else:
with (Path.home() / ".config" / "pawabot" / "bot_token.txt").open() as stream:
bot_token = stream.read().rstrip("\n")
updater = Updater(token=bot_token, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", callbacks.start))
dispatcher.add_handler(CommandHandler("help", callbacks.help))
dispatcher.add_handler(CommandHandler("myID", callbacks.my_id))
dispatcher.add_handler(CommandHandler("myPrivileges", privcallbacks.my_privileges))
dispatcher.add_handler(CommandHandler("requestAccess", privcallbacks.request_access))
dispatcher.add_handler(CommandHandler("grant", privcallbacks.grant, pass_args=True))
dispatcher.add_handler(CommandHandler("revoke", privcallbacks.revoke, pass_args=True))
handler_search = CommandHandler("search", callbacks.search, pass_args=True)
handler_search_pattern = MessageHandler(Filters.text, callbacks.search_pattern)
handler_search_select = MessageHandler(Filters.regex(r"^([1-9][0-9]*\+?|Cancel)$"), callbacks.search_select)
dispatcher.add_handler(
ConversationHandler(
entry_points=[handler_search],
states={
callbacks.STATE.SEARCH.PATTERN: [handler_search_pattern],
callbacks.STATE.SEARCH.SELECT: [handler_search_select],
},
fallbacks=[CommandHandler("cancel", callbacks.cancel)],
)
)
dispatcher.add_handler(handler_search)
# dispatcher.add_handler(InlineQueryHandler(callbacks.inline_search))
dispatcher.add_handler(MessageHandler(Filters.regex(callbacks.MAGNET_RE), callbacks.parse_magnet))
dispatcher.add_handler(CommandHandler("test", callbacks.test))
dispatcher.add_handler(MessageHandler(Filters.command, callbacks.unknown_command))
dispatcher.add_handler(MessageHandler(Filters.text, callbacks.unknown))
logging.info("Starting Bot")
updater.start_polling()
logging.info("Putting Bot in idle mode")
updater.idle()
return 0
else:
print(parser.format_help(), file=sys.stderr)
return 1
|
import torch
import torch.nn as nn
from torch.autograd import Function
import padding._C as _C
class PaddingFunction(Function):
@staticmethod
def forward(ctx, x, pad_h=1, pad_w=0, flag=False):
ctx.constant = pad_h, pad_w, flag
if not x.is_contiguous():
x = x.contiguous()
if x.is_cuda:
out = _C.padh_gpu_forward(x, pad_h, pad_w, flag)
else:
out = _C.padh_cpu_forward(x, pad_h, pad_w, flag)
return out
@staticmethod
def backward(ctx, grad_output):
pad_h, pad_w, flag = ctx.constant
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
if grad_output.is_cuda:
out = _C.padh_gpu_backward(grad_output, pad_h, pad_w, flag)
else:
out = _C.padh_cpu_backward(grad_output, pad_h, pad_w, flag)
return out, None, None, None
pad = PaddingFunction.apply
class Padding(nn.Module):
def __init__(self, pad_h=1, pad_w=0, onesided=False):
super(Padding, self).__init__()
self.pad_h = pad_h
self.pad_w = pad_w
self.onesided = onesided
def forward(self, x):
return pad(x, self.pad_h, self.pad_w, self.onesided)
class CropFunction(Function):
@staticmethod
def forward(ctx, x, r, pooled_h=1, pooled_w=1, first=True):
off = 1 if first else 0
height, width = x.size(2-off), x.size(3-off)
ctx.constant = height, width, first
ctx.save_for_backward(r)
if not x.is_contiguous():
x = x.contiguous()
if x.is_cuda:
out = _C.crop_gpu_forward(x, r, pooled_h, pooled_w, first)
else:
out = _C.crop_cpu_forward(x, r, pooled_h, pooled_w, first)
return out
@staticmethod
def backward(ctx, grad_output):
height, width, first = ctx.constant
r, = ctx.saved_tensors
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
if grad_output.is_cuda:
out = _C.crop_gpu_backward(grad_output, r, height, width, first)
else:
out = _C.crop_cpu_backward(grad_output, r, height, width, first)
return out, None, None, None, None
crop = CropFunction.apply
class Conv2DFunction(Function):
@staticmethod
def forward(ctx, x, weight, bias, stride, groups, padh, padw, onesided):
ctx.constant = padh, padw, onesided, stride, groups
ctx.save_for_backward(x)
ctx.save_for_backward(weight)
if not x.is_contiguous():
x = x.contiguous()
if x.is_cuda:
out = _C.conv2d_gpu_forward(
x, weight, bias,
padh, padw, onesided,
stride, groups)
else:
raise ValueError('CPU OP is not supported')
return out
@staticmethod
def backward(ctx, grad_output):
padh, padw, onesided, stride, groups = ctx.constant
x, weight = ctx.saved_tensors
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
if grad_output.is_cuda:
#grad_input, grad_weight, grad_bias
ret = _C.conv2d_gpu_backward(
grad_output, x, weight,
padh, padw, onesided,
stride, groups)
else:
raise ValueError('CPU OP is not supported')
ret = ret + (None, )*5
return ret
circular_conv2d = Conv2DFunction.apply
class Svf2DFunction(Function):
@staticmethod
def forward(ctx, x, r, weight, pooled_height, pooled_width, first):
off = 1 if first else 0
height, width = x.size(2-off), x.size(3-off)
ctx.constnat = height, width, pooled_height, pooled_widht, first
ctx.save_for_backward(x)
ctx.save_for_backward(r)
ctx.save_for_backward(weight)
if not x.is_contiguous():
x = x.contiguous()
if x.is_cuda:
out = _C.svf2d_gpu_forward(
x, r, weight, pooled_height, pooled_width, first)
else:
raise ValueError('CPU OP is not supported')
return out
@staticmethod
def backward(ctx, grad_output):
height, width, pooled_height, pooled_widht, first = ctx.constant
x, r, weight = ctx.saved_tensors
if not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
if grad_output.is_cuda:
#grad_input, grad_weight
ret = _C.svf2d_gpu_backward(
grad_output,
x, r, weight,
height, width, pooled_height, pooled_width, first)
else:
raise ValueError('CPU OP is not supported')
ret = ret + (None, )*5
return ret
circular_svf2d = Svf2DFunction.apply
|
import tensorflow as tf # tensorflow import
import numpy as np # python에서 벡터, 행렬 등 수치 연산을 수행하는 선형대수 라이브러리
import skimage.data # skimage는 이미지 처리하기 위한 파이썬 라이브러리
from PIL import Image, ImageDraw, ImageFont # PIL은 파이썬 인터프리터에 다양한 이미지 처리와 그래픽 기능을 제공하는 라이브러리
import math # 수학 관련 함수들이 들어있는 라이브러리
from tensorflow.python.platform import gfile # open()이랑 같고, tensorflow용 파일 입출력 함수
import scipy.misc # scipy에서 기타 함수 https://docs.scipy.org/doc/scipy/reference/misc.html
from utils.vector import cross #외적 함수
IMAGE_HEIGHT = 256 # 이미지 가로 크기
IMAGE_WIDTH = 256 # 이미지 세로 크기
# *****************************************************************************************************
#rigid_transform_3D(A,B): A와 B, 2개의 correspondence 좌표가 있을 때 그에 맞는 R과 T는 출력하는 함수
#get_pc_transformation2(p1,p2):rigid_transform_3D(p1, p2)사용 p1과 p2를 받으면 그것에 대한 R과 t, 그리고 받은 R과 t를 사용해서 예측한 p1_2를 output으로 내보낸다.
#Depth2Points3D_transformed_vector(Dlambda, indices , Rt, Ki, cen, origin, scaling): depth 정보(Dlambda), 미리 define한 점들의 index 정보인 indices에 x,y좌표 정보가 있고, camera intrinsic parameter Rt, Ki, cen, scaling factor 들을 받아서 3D reconstruction 점을 출력으로 내보낸다.
#part_transformation2(i_limit,PC1,PC2,p): get_pc_transformation2(p1, p2)을 사용해서 p1_2를 받고, 그 p2를 p2p로 저장해서 p2p와 p1_2를 보두 출력으로 뱉는다.
#transform_depth_PCs_dp_based2(C,R,Rt,cen,K,Ki,origin,scaling,d_i,d_j,i_r1_c1_r2_c2,i_limit): j번쨰 time instant의 실제 좌표와 warping function으로 예측한 좌표의 값을 출력하는 함수
#reproject(point3D, K,R,C): 3차원 좌표를 다시 2차원으로 reproject하는 것을 의미한다.
#compute_dp_tr_3d_2d_loss2(d_i,d_j,i_r1_c1_r2_c2,i_limit,C,R,Rt,cen,K,Ki,origin,scaling): 3차원 좌표끼리의 loss와 그 3차원 좌표를 reprojection한 좌표끼리의 loss를 모두 출력한다. 그리고 예측한 좌표와 실제 좌표도 출력한다.
# *****************************************************************************************************
def rigid_transform_3D(A,B):# B는 warping function에 의해 예측된 p값, A는 i번째 instance의 p값이다. 그리고 이 함수는 그에 맞는 R과 T를 내보낸다.
A = tf.transpose(A) #3*N, tf.transpose는 matrix에 transpose를 시켜준다.
B = tf.transpose(B) #3*N, B=R*A+T이다.
num_rows = tf.shape(B)[0] #3, tf.shape는 input 텐서의 구조를 1-d 정수형 텐서로 반환한다. [0]은 행이다.
num_cols = tf.shape(B)[1] #N, [1]은 열이다.
centroid_A = tf.reshape(tf.reduce_mean(A,1),[3,1]) #3*1, 1*3을 3*1로 reshape했다.
centroid_B = tf.reshape(tf.reduce_mean(B,1),[3,1]) #3*1
one_row = tf.ones([1,num_cols], tf.float32) # 1*N, tf.ones는 모든 요소가 1로 설정된 텐서를 생성한다.
Amean = tf.concat([one_row*centroid_A[0,0],one_row*centroid_A[1,0],one_row*centroid_A[2,0]],0) #3*N, 여기서 centoid_A[0,0]은 3*1 column의 첫번째 행의 값이다. 즉, 상수값이란 소리다. 그리고 tf.concat(,0)이기에 행을 위에서부터 이어붙이는 것이다.
Bmean = tf.concat([one_row*centroid_B[0,0],one_row*centroid_B[1,0],one_row*centroid_B[2,0]],0) #3*N, 위의 코드 설명과 마찬가지
Am = tf.subtract(A , Amean)#A의 각 행에 그 행의 평균을 모두 빼준다.
Bm = tf.subtract(B , Bmean)#B의 각 행에 그 행의 평균을 모두 빼준다.
H = tf.matmul(Am , tf.transpose(Bm))#(3*N)*(N*3)=3*3
S, U, V = tf.linalg.svd(H)#tf.linalg.svd(H)는 H를 SVD decomposition한다.
R = tf.matmul(V,tf.transpose(U))#R을 구하는 식
t = tf.matmul(R*(-1),centroid_A) + centroid_B#http://graphics.stanford.edu/~smr/ICP/comparison/eggert_comparison_mva97.pdf 에 자세히 나와있다. R과 T를 유도하는 식이다.
return R,t
def get_pc_transformation2(p1,p2):#p1은 i번째 instance의 p값, p2는 warping function에 의해 예측된 j번째 instance의 p값
R,t = rigid_transform_3D(p1, p2)#R과 t가 계산되어서 나온다.
one_row = tf.ones([1,tf.shape(p1)[0]],tf.float32) # 1*N, p1이 N*3인데, tf.shape(p1)[0]은 row의 개수를 내보내기에 N을 내보내고, 1*N의 1로 구성된 matrix가 나온다.
tmat = tf.concat([one_row*t[0,0],one_row*t[1,0],one_row*t[2,0]],0) #3*N, t가 3*1인데, tmat의 첫 번째 row는 t의 첫번째 row값들이 N개 있고, 두번째 row는 t의 2번째 row값들이 N개 있고, 세번째 row는 t의 3번째 row값들이 N개 있다.
p1_2 = tf.transpose(tf.matmul(R,tf.transpose(p1)) + tmat) #N*3, R*p1^T+tmat인데, (RA+T)^T이기에 N*3이 된다. 즉, 새로운 R과 T를 반영해서 계산한 warping function으로 예측된 j번째 instance의 p인 것 같다.
return R,t, p1_2#새로운 R, t와 이것들을 반영하여 예측된 j번째 instance의 p값
# *****************************************************************************************************
def Depth2Points3D_transformed_vector(Dlambda, indices , Rt, Ki, cen, origin, scaling):#3D point를 reconstruction하는 함수
num_of_points = tf.shape(Dlambda)[0] #N, Dlamda의 행의 shape을 정수형 tensor로 반환. Dlamda의 행이 N인가보다. 그리고 N은 point의 개수인 것 같다.
num_of_batches = 1 #batch의 사이즈
num_of_points_in_each_batch = tf.cast(tf.divide(num_of_points,num_of_batches),tf.int32)#tf.cast는 텐서를 새로운 형태로 캐스팅하는데 사용한다.tf.divide로 point의 개수를 batch의 사이즈로 나눠서 한 배치당 몇개의 point를 다루는지 출력한다.
Dlambda_t = tf.reshape(Dlambda,[1,num_of_points]) # 1 x N, Dlamda가 N*1로 되어있는데, 이를 1*N으로 reshape하는 것 같다. 그냥 transpose와 같다고 볼 수 있을 것 같다.
Dlambda3 = tf.concat([Dlambda_t,Dlambda_t],0)# 위 아래로 같은 행을 붙인 것이다. 2 x N
Dlambda3 = tf.concat([Dlambda3,Dlambda_t],0) # 3 x N, 또 한 번 붙였다.
idx = tf.cast(indices, tf.float32)# indices는 index들을 말하는 것 같다. tf.cat으로 이를 float32로 typecasting 한다.
row_of_ones = tf.ones([1, num_of_points], tf.float32) # 1 x N의 요소가 모두 1로 이루어진 행렬 제작
# dividing xy and the batch number
bxy = idx # N x 3<--이라고 써있는데, N X 2인 것 같다.
xy = tf.transpose(tf.reverse(bxy,[1])) # 2 x N, tf.reverse로 [1] 즉, 열을 반전시킨다. 그리고 나서 transpose를 한다.
# tiling the scaling to match the data(데이터를 일치시키기 위해 적도 조정)
scaling2 = tf.reshape(scaling, [num_of_batches,1])# scaling값을 [1,1]로 reshape한다.
tiled_scaling = tf.tile(scaling2, [tf.constant(1),num_of_points_in_each_batch])#tf.tile 함수는 주어진 텐서를 multiplies 만큼 이어붙이는 함수로 여기서는 1xN matrix가 된다.[scaling2,scaling2,....scaling2]
scaling_row = tf.reshape(tiled_scaling,[1,num_of_points])#1XN, tiled scaling을 또 reshape한다.
scaling_2_rows = tf.concat([scaling_row,scaling_row],0)#concat으로 이어붙여서 2xN이 된다.
# scaling the input
scaled_xy = tf.multiply(xy, scaling_2_rows)#2xN, tf.multifly는 각 요소별로 곱하는 것이다. scaling factor를 각 요소에 곱한다.
# dividing the origin 0 and origin 1 of the origin
origin0 = origin[...,0]#ixjxk에서 ixj는 모두 포함하고 k번째 index 기준으로 k번째 index가 0인 것을 가져오는 것
origin0 = tf.reshape(origin0,[num_of_batches,1])#origin0을 [1,1]로 reshape한다.
origin1 = origin[...,1]#ixjxk에서 ixj는 모두 포함하고 k번째 index 기준으로 k번째 index가 1인 것을 가져오는 것
origin1 = tf.reshape(origin1,[num_of_batches,1])#origin1을 [1,1]로 reshape한다. 그냥 scalar인듯
# tiling the origin0 to match the data
tiled_origin0= tf.tile(origin0, [tf.constant(1),num_of_points_in_each_batch])#1xN ,tf.tile 함수는 주어진 텐서를 multiplies 만큼 이어붙이는 함수로 여기서는 1xN matrix가 된다.[origin0,origin0,....origin0]
origin0_row = tf.reshape(tiled_origin0,[1,num_of_points])#1xN인 것을 1xN으로 reshape한다. 결국 같다.
# tiling the origin1 to match the data
tiled_origin1= tf.tile(origin1, [tf.constant(1),num_of_points_in_each_batch])#1xN ,tf.tile 함수는 주어진 텐서를 multiplies 만큼 이어붙이는 함수로 여기서는 1xN matrix가 된다.[origin1,origin1,....origin1]
origin1_row = tf.reshape(tiled_origin1,[1,num_of_points])#1xN인 것을 1xN으로 reshape한다. 결국 같다.
# concatinating origin 0 and origin1 tiled
origin_2_rows = tf.concat([origin0_row,origin1_row],0)#origin0_row와 origin1_row를 행으로 이어붙인다. 따라서 2xN이다.
# computing the translated and scaled xy
xy_translated_scaled = tf.add(scaled_xy ,origin_2_rows) # 2 x N, scaled_xy와 origin을 요소별로 더한다. 이게 image 상의 point가 된다.
xy1 = tf.concat([xy_translated_scaled,row_of_ones],0)#밑에가 모두 1인 3xN matrix인데 생각해보면 이게 homogeneus representation인 것 같다.
cen1 = tf.multiply(row_of_ones,cen[0])#1xN인데 모든 요소가 cen[0]인 matrix
cen2 = tf.multiply(row_of_ones,cen[1])#1xN인데 모든 요소가 cen[1]인 matrix
cen3 = tf.multiply(row_of_ones,cen[2])#1xN인데 모든 요소가 cen[2]인 matrix
cen_mat = tf.concat([cen1,cen2],0)
cen_mat = tf.concat([cen_mat,cen3],0)#결국 1번째 행은 cen[0], 결국 2번째 행은 cen[1], 결국 3번째 행은 cen[2]인 3xN인 center matrix를 만든다.
Rt_Ki = tf.matmul(Rt,Ki)#Rt는 그냥 identity matrix이고, Ki가 K 카메라 intrinsic camera parameter의 inverse matrix이다.
Rt_Ki_xy1 = tf.matmul(Rt_Ki,xy1)#이건 그냥 그 image 좌표 매트릭스랑 카메라 인트린식 좌표 매트릭스랑 곱한거
point3D = tf.add(tf.multiply(Dlambda3,Rt_Ki_xy1),cen_mat)#3xN matrix이다. Dlamda3가 깊이인 것 같다. 그리거 모두 곱하고 cen_mat를 더해줘서 최종적으로 reconstruction한 3D point가 나온다.
#DONE
return tf.transpose(point3D)# 3xN을 Nx3으로 transpose해서 출력한다.
# *****************************************************************************************************
def part_transformation2(i_limit,PC1,PC2,p):
strp = i_limit[p,1]#part_transformation에서 미리 정의한 transformation을 part별로 대표하는 점들을 define한다고 했었는데, 해당 part p의 처음 point의 index인 것 같다.
endp = i_limit[p,2]+1#part_transformation에서 미리 정의한 transformation을 part별로 대표하는 점들을 define한다고 했었는데, 해당 part p의 마지막 point의 index인 것 같다.
p2p = tf.zeros([],dtype=tf.float32)#모든 요소가 0인 tensor를 정의하는 것이다.
p1_2 = tf.zeros([],dtype=tf.float32)#모든 요소가 0인 tensor를 정의하는 것이다.
p1 = PC1[strp:endp,:]#열은 모두 사용하고, 행은 strp번째에서 endp-1번째 행까지 사용한다.p1은 i번째 time instant의 3D 좌표값
p2 = PC2[strp:endp,:]#열은 모두 사용하고, 행은 strp번째에서 endp-1번째 행까지 사용한다.p2은 warping function에 의해 예측된 j번째 time instant의 p값
_,_,p1_2 = get_pc_transformation2(p1,p2)#새로운 R, t를 반영하여 예측된 j번째 instance의 p값을 p1_2라 하고, R과 t는 넘긴다.
p2p = PC2[strp:endp,:]#열은 모두 사용하고, 행은 strp번째에서 endp-1번째 행까지 사용한다. 새로운 예측값이 아니라 과거의 예측값을 의미
return p2p, p1_2#p2p는 과거의 예측값, p1_2는 새로운 R,t를 반영한 예측값
# *****************************************************************************************************
def transform_depth_PCs_dp_based2(C,R,Rt,cen,K,Ki,origin,scaling,d_i,d_j,i_r1_c1_r2_c2,i_limit):
d1 = d_i[0,...,0]#i번째 time instant의 깊이 정보
d2 = d_j[0,...,0]#j번째 time instant의 깊이 정보
r1 = i_r1_c1_r2_c2[:,1]-1; c1 = i_r1_c1_r2_c2[:,2]-1;#r1은 i_r1_c1_r2_c2의 2번째 column, c1은 i_r1_c1_r2_c2의 3번째 column
r2 = i_r1_c1_r2_c2[:,3]-1; c2 = i_r1_c1_r2_c2[:,4]-1;#r2은 i_r1_c1_r2_c2의 4번째 column, c2은 i_r1_c1_r2_c2의 5번째 column
n = tf.shape(i_r1_c1_r2_c2)[0]#n은 i_r1_c1_r2_c2의 행의 개수
r1 = tf.reshape(r1,[n,1]); c1 = tf.reshape(c1,[n,1]);#모두 nx1이었던 것을 다시 nx1로 reshape한다.
r2 = tf.reshape(r2,[n,1]); c2 = tf.reshape(c2,[n,1]);#모두 nx1이었던 것을 다시 nx1로 reshape한다.
indices1 = tf.concat([r1,c1],1) #N*2 열로 붙인다.
indices2 = tf.concat([r2,c2],1) #N*2 열로 붙인다.
lambda1 = tf.gather_nd(d1,indices1); #tf.gather_nd(params, indices, name=None),indices1에 따라 d1에서 값들을 모은다.
lambda2 = tf.gather_nd(d2,indices2); #tf.gather_nd(params, indices, name=None),indices2에 따라 d2에서 값들을 모은다.
PC1 = Depth2Points3D_transformed_vector(lambda1, indices1 , Rt, Ki, cen, origin, scaling)#i번째의 reconstruction한 3D coordinate parameter을 받는다. Nx3
PC2 = Depth2Points3D_transformed_vector(lambda2, indices2 , Rt, Ki, cen, origin, scaling)#j번째의 reconstruction한 3D coordinate parameter을 받는다. Nx3
PC2p, PC1_2 = part_transformation2(i_limit,PC1,PC2,0); #0번 part의 3D reconstruction 좌표들
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,1); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #1번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,2); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #2번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,3); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #3번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,4); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #4번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,5); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #5번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,6); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #6번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,7); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #7번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,8); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #8번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,9); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #9번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,10); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #10번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,11); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #11번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,12); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #12번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,13); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #13번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,14); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #14번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,15); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #15번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,16); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #16번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,17); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #17번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,18); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #18번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,19); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #19번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,20); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #20번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,21); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #21번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,22); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #22번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
p2p, p1_2 = part_transformation2(i_limit,PC1,PC2,23); PC2p = tf.concat([PC2p,p2p],0); PC1_2 = tf.concat([PC1_2,p1_2],0); #23번 part의 3D reconstruction 좌표들 그리고 tf.concat으로 행끼리 붙인다.
return PC2p, PC1_2 #최종적으로 모든 실제 j번째 instant의 3D 좌표, warping으로 예측한 3D 좌표가 모두 concat으로 저장되었다. 그걸 출력한다.
# *****************************************************************************************************
def reproject(point3D, K,R,C):
# point3D is N*3 and M is 3*4
# xy is N*2
M = tf.matmul(K,R)
M = tf.matmul(M,C)#결론적으로 M=KRC
point3D = tf.transpose(point3D)
num_of_points = tf.shape(point3D)[1]
row_of_ones = tf.ones([1, num_of_points], tf.float32)
xyz1 = tf.concat([point3D,row_of_ones],0)
xyS = tf.matmul(M, xyz1)
S = xyS[2,...]
S = tf.reshape(S,[1,num_of_points])
S2 = tf.concat([S,S],0)
S3 = tf.concat([S2,S],0)
xy1 = tf.truediv(xyS, S3)
xy = xy1[0:2,...]
xy = tf.transpose(xy)
x = xy[...,0]; x=tf.reshape(x,[num_of_points,1])
y = xy[...,1]; y=tf.reshape(y,[num_of_points,1])
rc = tf.concat([y,x],1)
return xy,rc
# *****************************************************************************************************
def compute_dp_tr_3d_2d_loss2(d_i,d_j,i_r1_c1_r2_c2,i_limit,C,R,Rt,cen,K,Ki,origin,scaling):
PC2p, PC1_2 = transform_depth_PCs_dp_based2(C,R,Rt,cen,K,Ki,origin,scaling,d_i,d_j,i_r1_c1_r2_c2,i_limit)
d = tf.subtract(PC2p, PC1_2)
err_vec = tf.sqrt(tf.reduce_sum(tf.square(d),1));
loss3d = tf.reduce_mean(err_vec)
x2,_ = reproject(PC2p, K,R,C)
x1_2,_ = reproject(PC1_2, K,R,C)
d = tf.subtract(x2, x1_2)
err_vec = tf.sqrt(tf.reduce_sum(tf.square(d),1));
loss2d = tf.reduce_mean(err_vec)
return loss3d, loss2d,PC2p, PC1_2
# *****************************************************************************************************
|
"""
==================================
Input and output (:mod:`pyrad.io`)
==================================
.. currentmodule:: pyrad.io
Functions to read and write data and configuration files.
Reading configuration files
===========================
.. autosummary::
:toctree: generated/
read_config
Reading radar data
==================
.. autosummary::
:toctree: generated/
get_data
Reading cosmo data
==================
.. autosummary::
:toctree: generated/
cosmo2radar_data
cosmo2radar_coord
hzt2radar_data
hzt2radar_coord
get_cosmo_fields
get_iso0_field
read_cosmo_data
read_cosmo_coord
read_hzt_data
read_iso0_mf_data
iso2radar_data
get_iso0_ref
Reading DEM data
==================
.. autosummary::
:toctree: generated/
dem2radar_data
dem2radar_coord
read_idrisi_data
read_idrisi_metadata
Reading other data
==================
.. autosummary::
:toctree: generated/
read_proc_periods
read_last_state
read_status
read_rad4alp_cosmo
read_rad4alp_vis
read_excess_gates
read_colocated_gates
read_colocated_data
read_timeseries
read_ts_cum
read_monitoring_ts
read_intercomp_scores_ts
get_sensor_data
read_smn
read_smn2
read_disdro_scattering
read_sun_hits
read_sun_hits_multiple_days
read_sun_retrieval
read_solar_flux
read_selfconsistency
read_antenna_pattern
read_meteorage
read_lightning
read_lightning_traj
read_lightning_all
read_trt_scores
read_trt_data
read_trt_traj_data
read_trt_thundertracking_traj_data
read_trt_cell_lightning
read_trt_info_all
read_trt_info_all2
read_trt_info
read_trt_info2
read_thundertracking_info
read_rhi_profile
read_histogram
read_quantiles
read_profile_ts
read_histogram_ts
read_quantiles_ts
read_ml_ts
read_windmills_data
Writing data
==================
.. autosummary::
:toctree: generated/
write_proc_periods
write_ts_lightning
send_msg
write_alarm_msg
write_last_state
write_smn
write_trt_info
write_trt_thundertracking_data
write_trt_cell_data
write_trt_cell_scores
write_trt_cell_lightning
write_trt_rpc
write_rhi_profile
write_field_coverage
write_cdf
write_histogram
write_quantiles
write_ts_polar_data
write_ts_grid_data
write_ts_cum
write_ts_stats
write_monitoring_ts
write_excess_gates
write_intercomp_scores_ts
write_colocated_gates
write_colocated_data
write_colocated_data_time_avg
write_sun_hits
write_sun_retrieval
write_fixed_angle
Auxiliary functions
===================
.. autosummary::
:toctree: generated/
get_rad4alp_prod_fname
map_hydro
map_Doppler
get_save_dir
make_filename
generate_field_name_str
get_fieldname_pyart
get_fieldname_cosmo
get_field_unit
get_file_list
get_rad4alp_dir
get_rad4alp_grid_dir
get_trtfile_list
get_new_rainbow_file_name
get_datatype_fields
get_dataset_fields
get_datetime
find_raw_cosmo_file
find_hzt_file
find_iso0_file
_get_datetime
Trajectory
==========
.. autosummary::
:toctree: generated/
Trajectory
TimeSeries
==========
.. autosummary::
:toctree: generated/
TimeSeries
"""
from .config import read_config
from .read_data_radar import get_data, add_field, interpol_field
from .read_data_cosmo import read_cosmo_data, read_cosmo_coord
from .read_data_cosmo import cosmo2radar_data, cosmo2radar_coord
from .read_data_cosmo import get_cosmo_fields
from .read_data_dem import read_idrisi_data, read_idrisi_metadata
from .read_data_hzt import read_hzt_data, hzt2radar_data, hzt2radar_coord
from .read_data_hzt import get_iso0_field
from .read_data_iso0_mf import read_iso0_mf_data, iso2radar_data, get_iso0_ref
from .read_data_other import read_status, read_rad4alp_cosmo, read_rad4alp_vis
from .read_data_other import read_timeseries, read_monitoring_ts, read_ts_cum
from .read_data_other import read_intercomp_scores_ts, read_quantiles
from .read_data_other import read_selfconsistency, read_colocated_gates
from .read_data_other import read_colocated_data, read_antenna_pattern
from .read_data_other import read_last_state, read_rhi_profile
from .read_data_other import read_excess_gates, read_histogram
from .read_data_other import read_profile_ts, read_histogram_ts
from .read_data_other import read_quantiles_ts, read_ml_ts, read_proc_periods
from .read_data_sensor import read_lightning, read_lightning_traj
from .read_data_sensor import get_sensor_data, read_smn, read_smn2
from .read_data_sensor import read_disdro_scattering, read_trt_data
from .read_data_sensor import read_trt_traj_data, read_lightning_all
from .read_data_sensor import read_trt_scores, read_trt_cell_lightning
from .read_data_sensor import read_meteorage, read_trt_info_all, read_trt_info
from .read_data_sensor import read_thundertracking_info, read_windmills_data
from .read_data_sensor import read_trt_info2, read_trt_info_all2
from .read_data_sensor import read_trt_thundertracking_traj_data
from .read_data_sun import read_sun_hits_multiple_days, read_sun_hits
from .read_data_sun import read_sun_retrieval, read_solar_flux
from .write_data import write_smn, write_ts_polar_data, write_ts_cum
from .write_data import write_monitoring_ts, write_intercomp_scores_ts
from .write_data import write_sun_hits, write_sun_retrieval
from .write_data import write_colocated_gates, write_colocated_data
from .write_data import write_colocated_data_time_avg, write_cdf
from .write_data import write_rhi_profile, write_field_coverage
from .write_data import write_last_state, write_alarm_msg, send_msg
from .write_data import write_excess_gates, write_trt_cell_data
from .write_data import write_histogram, write_quantiles, write_ts_lightning
from .write_data import write_trt_cell_scores, write_trt_cell_lightning
from .write_data import write_trt_info, write_fixed_angle, write_proc_periods
from .write_data import write_trt_thundertracking_data, write_ts_grid_data
from .write_data import write_trt_rpc, write_ts_stats
from .io_aux import get_save_dir, make_filename, get_new_rainbow_file_name
from .io_aux import get_datetime, get_dataset_fields, map_hydro, map_Doppler
from .io_aux import get_file_list, get_trtfile_list, get_datatype_fields
from .io_aux import get_fieldname_pyart, get_field_unit, get_fieldname_cosmo
from .io_aux import generate_field_name_str, find_raw_cosmo_file
from .io_aux import find_hzt_file, _get_datetime, get_rad4alp_prod_fname
from .io_aux import get_rad4alp_dir, get_rad4alp_grid_dir, find_iso0_file
from .trajectory import Trajectory
from .timeseries import TimeSeries
__all__ = [s for s in dir() if not s.startswith('_')]
|
import numpy as np
import os
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
## routine written by Kelton Halbert and Greg Blumberg
## keltonhalbert@ou.edu and wblumberg@ou.edu
__all__ = ['backgroundSTPEF', 'plotSTPEF']
class backgroundSTPEF(QtGui.QFrame):
'''
Draw the background frame and lines for the Theta-E plot frame
'''
def __init__(self):
super(backgroundSTPEF, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
if self.physicalDpiX() > 75:
fsize = 10
else:
fsize = 11
self.plot_font = QtGui.QFont('Helvetica', fsize + 1)
self.box_font = QtGui.QFont('Helvetica', fsize)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
self.plot_height = self.plot_metrics.xHeight() + 5
self.box_height = self.box_metrics.xHeight() + 5
self.lpad = 0.; self.rpad = 0.
self.tpad = 25.; self.bpad = 15.
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
self.probmax = 70.; self.probmin = 0.
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
qp.end()
def setBlackPen(self, qp):
color = QtGui.QColor('#000000')
color.setAlphaF(.5)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setPen(pen)
qp.setBrush(brush)
return qp
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
EF1_color = "#006600"
EF2_color = "#FFCC33"
EF3_color = "#FF0000"
EF4_color = "#FF00FF"
pen = QtGui.QPen(QtCore.Qt.white, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
rect1 = QtCore.QRectF(1.5, 2, self.brx, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'Conditional Tornado Probs based on STPC')
qp.setFont(QtGui.QFont('Helvetica', 9))
color = QtGui.QColor(EF1_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.stpc_to_pix(.2), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF1+')
color = QtGui.QColor(EF2_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.stpc_to_pix(1.1), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF2+')
color = QtGui.QColor(EF3_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.stpc_to_pix(3.1), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF3+')
color = QtGui.QColor(EF4_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.stpc_to_pix(6.1), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF4+')
pen = QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
ytick_fontsize = 10
y_ticks_font = QtGui.QFont('Helvetica', ytick_fontsize)
qp.setFont(y_ticks_font)
efstp_inset_data = inset_data.condSTPData()
texts = efstp_inset_data['ytexts']
spacing = self.bry / 10.
y_ticks = np.arange(self.tpad, self.bry+spacing, spacing)
for i in xrange(len(y_ticks)):
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.tlx, self.prob_to_pix(int(texts[i])), self.brx, self.prob_to_pix(int(texts[i])))
except:
continue
color = QtGui.QColor('#000000')
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
ypos = spacing*(i+1) - (spacing/4.)
ypos = self.prob_to_pix(int(texts[i])) - ytick_fontsize/2
rect = QtCore.QRect(self.tlx, ypos, 20, ytick_fontsize)
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
width = self.brx / 12
spacing = self.brx / 12
center = np.arange(spacing, self.brx, spacing) - width/2.
texts = efstp_inset_data['xticks']
# Draw the x tick marks
qp.setFont(QtGui.QFont('Helvetica', 8))
for i in xrange(np.asarray(texts).shape[0]):
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
rect = QtCore.QRectF(center[i], self.prob_to_pix(-2), width, 4)
# Change to a white pen to draw the text below the box and whisker plot
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
# Draw the EF1+ stuff
ef1 = efstp_inset_data['EF1+']
color = QtGui.QColor(EF1_color)
pen = QtGui.QPen(color, 3, QtCore.Qt.SolidLine)
qp.setPen(pen)
for i in xrange(1, np.asarray(texts).shape[0], 1):
qp.drawLine(center[i-1] + width/2, self.prob_to_pix(ef1[i-1]), center[i] + width/2, self.prob_to_pix(ef1[i]))
# Draw the EF2+ stuff
ef1 = efstp_inset_data['EF2+']
color = QtGui.QColor(EF2_color)
pen = QtGui.QPen(color, 3, QtCore.Qt.SolidLine)
qp.setPen(pen)
for i in xrange(1, np.asarray(texts).shape[0], 1):
qp.drawLine(center[i-1] + width/2, self.prob_to_pix(ef1[i-1]), center[i] + width/2, self.prob_to_pix(ef1[i]))
# Draw the EF3+ stuff
ef1 = efstp_inset_data['EF3+']
color = QtGui.QColor(EF3_color)
pen = QtGui.QPen(color, 3, QtCore.Qt.SolidLine)
qp.setPen(pen)
for i in xrange(1, np.asarray(texts).shape[0], 1):
qp.drawLine(center[i-1] + width/2, self.prob_to_pix(ef1[i-1]), center[i] + width/2, self.prob_to_pix(ef1[i]))
# Draw the EF4+ stuff
ef1 = efstp_inset_data['EF4+']
color = QtGui.QColor(EF4_color)
pen = QtGui.QPen(color, 3, QtCore.Qt.SolidLine)
qp.setPen(pen)
for i in xrange(1, np.asarray(texts).shape[0], 1):
qp.drawLine(center[i-1] + width/2, self.prob_to_pix(ef1[i-1]), center[i] + width/2, self.prob_to_pix(ef1[i]))
def prob_to_pix(self, prob):
scl1 = self.probmax - self.probmin
scl2 = self.probmin + prob
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
def stpc_to_pix(self, stpc):
spacing = self.brx / 12
center = np.arange(spacing, self.brx, spacing)
if stpc == 0:
i = 0
elif stpc >= 0.01 and stpc < .5:
i = 1
elif stpc >= .5 and stpc < 1:
i = 2
elif stpc >= 1 and stpc < 2:
i = 3
elif stpc >= 2 and stpc < 3:
i = 4
elif stpc >= 3 and stpc < 4:
i = 5
elif stpc >= 4 and stpc < 6:
i = 6
elif stpc >= 6 and stpc < 8:
i = 7
elif stpc >= 8 and stpc < 10:
i = 8
elif stpc >= 10 and stpc < 12:
i = 9
else:
i = 10
return center[i]
class plotSTPEF(backgroundSTPEF):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
super(plotSTPEF, self).__init__()
self.prof = None
def setProf(self, prof):
self.prof = prof
self.stpc = prof.stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotSTPEF, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
super(plotSTPEF, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(QtCore.Qt.black)
def plotData(self):
'''
Handles painting on the frame
'''
if self.prof is None:
return
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
self.draw_stp(qp)
def draw_stp(self, qp):
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
stpc_pix = self.stpc_to_pix(self.stpc)
pen = QtGui.QPen(QtGui.QColor("#FFFFFF"), 1.5, QtCore.Qt.DotLine)
qp.setPen(pen)
qp.drawLine(stpc_pix, self.prob_to_pix(0), stpc_pix, self.prob_to_pix(70))
qp.end()
|
import json
import re
from random import choice
from textwrap import wrap
from typing import Dict, List, Tuple, Union
from PIL import Image, ImageDraw, ImageFont
from .type_interfaces import GraphicInfo, GraphicSettings
from .validation import __validate_text_loaded
def __load_quotes_txt(file_path: str) -> List[Tuple[str]]:
"""Scrape quotes from a given TXT file. Titles need to be wrapped
by square brackets ([]) and the respective quote needs to come in
the next line.
Parameters
----------
file_path : str
Path to the .txt file with lyrics/quotes.
Returns
-------
List[Tuple[str]]
List of tuples that contain the title and text of each quote loaded.
"""
# Load the source file contents as a single string
with open(file_path, "r", encoding="utf-8") as source_file:
contents = "".join(source_file.readlines())
# Get all the titles
pattern_titles = r"\[(.*?)\]"
titles = re.findall(pattern_titles, contents)
# Get all the quotes
pattern_quotes = r"^([^\[].*?[^\]])$"
quotes = re.findall(pattern_quotes, contents, flags=re.MULTILINE)
# Validate the loaded titles and quotes
__validate_text_loaded(titles, quotes)
# Pair the titles with the respective quotes in two-item lists
titles_quotes = list(zip(titles, quotes))
return titles_quotes
def __load_text_json(file_path: str) -> Dict[str, str]:
"""Load quotes from a JSON file, i.e., load a JSON objects that maps titles to quotes/lyrics.
Parameters
----------
file_path : str
Path to the .json file with lyrics/quotes.
Returns
-------
Dict[str, str]
Dictionary that maps titles to the respective quote/lyrics.
"""
with open(file_path, "r", encoding="utf-8") as json_file:
json_quotes = json.load(json_file)
return json_quotes
def parse_json_settings(file_path: str) -> GraphicSettings:
"""Load a the `graphic_settings` from a JSON file.
Parameters
----------
file_path : str
Path to the .json file with lyrics/quotes.
Returns
-------
GraphicSettings
A dictionary of settings for a graphic.
"""
with open(file_path, "r", encoding="utf-8") as json_file:
json_settings = json.load(json_file)
return json_settings
def __update_title_counts(quotes: List[Tuple[str]]) -> Dict[str, str]:
"""Given a list of lists of titles and quotes loaded from a .txt file, update the titles with the respective frequencies.
Parameters
----------
quotes : List[Tuple[str]]
List of tuples that contain the title and quote of each graphic.
Returns
-------
Dict[str, str]
Dictionary that maps titles to the corresponding lyrics/quote.
"""
# Freqs of each unique quote
title_freqs = {}
# Dictionary with the updated titles mapped to the corresponding quotes
updated_quotes = {}
# Loop through the loaded quotes to update titles with their frequencies
for quote in quotes:
title = quote[0]
text = quote[1]
# If this quote title has been seen before, update the title with its current frequency
if title in title_freqs:
# Update the title frequency
title_freqs[title] += 1
# Update the title with its current frequency
updated_title = f"{title} {str(title_freqs[title])}"
# Add the updated quote information to the dictionary
updated_quotes[updated_title] = text
# If this is the first time seeing the quote, simply use it as is
else:
title_freqs[title] = 1
updated_quotes[title] = text
return updated_quotes
def get_ready_text(file_path: str) -> Dict[str, str]:
"""Load quotes/lyrics from a source file, .txt or .json, and update the corresponding
quotes/lyrics' titles with their frequency (in the case of the former option).
Parameters
----------
file_path : str
Path to the .txt or .json file.
Returns
-------
Dict[str, str]
A mapping of the loaded titles to the respective quote/lyrics.
"""
# Get the file extension and load the quotes accordingly (from a TXT or JSON)
file_ext = file_path.split(".")[-1]
# TXT need to be loaded and have their titles updated (so there's no duplicate
# titles)
if file_ext == "txt":
titles_quotes = __load_quotes_txt(file_path)
# And update the titles with their frequencies
titles_quotes_ready = __update_title_counts(titles_quotes)
# Since JSON objects can't have duplicate keys, it is assumed the the titles\
# are already unique in some way
elif file_ext == "json":
titles_quotes_ready = __load_text_json(file_path)
return titles_quotes_ready
|
#!user/bin/env python3
__author__ = "tooraj_jahangiri"
__email__ = "toorajjahangiri@gmail.com"
from base64 import b64encode, b64decode
from pointerdic import PointerDic
from time import perf_counter
from random import choice
def main() -> int:
"""
PointerDic Main prg: [check(class): ./pointerdic.py]
I did not use argparse or sys.argv
ALL INPUT CMD SUPPORT:
/I: change intro [list]
/F: update focus [int]
/R: reversed map [int]
>>: focus up [int] , default= 1
<<: focus down [int] , default= 1
+: add items to map [list]
-: sub items from map [list]
/reset: reset to default
----
en: encrypt data
de: decrypt data
pattern:/support command/
/All command support/-> [cmd][value]
/Only 'en' & 'de'/-> [cmd]
example:/use command/
All available -> '/F 3' |mean update focus to 3
en & de -> 'en' |mean set to encrypt mode for one[1] period encrypt
"""
# set PointerDic class
print("/...STARTING.../\t[W&8]")
remap = [chr(i) for i in range(33, 127)] # make list value ASCII 33-127
print("@/>:[MAKE INTRO]", end="\n")
foc = choice([num for num in range(0, len(remap))]) # chose focus
print(f"@/>:[CHOSE FOCUS]=[{foc}]", end="\n")
cls_active = PointerDic(remap, focus=foc) # init class
print("@/>:[POINTER DIC]", end="\n")
trc_e = str.maketrans(cls_active.map['A']) # make encrypt translator 'Alpha'
trc_d = str.maketrans(cls_active.map['B']) # make decrypt translator 'Beta'
print("@/>:[STRING TRANSLATOR]", end="\n")
# remove used value
del remap, foc
print("*Hint: if u need Help type ['help'] or ['/?'].\tQuit is ['exit'] or ['/*'].", end="\n")
order = {
"/I": ('cls_active.change_intro', list, 'change intro char list. type[list]'), # intro
"/F": ('cls_active.update_map', int, 'change focus. type[int]'), # focus
"/R": ('cls_active.__reversed__', int, 'reversed skeleton. optional focus type[int]'), # reverse
">>": ('cls_active.__rshift__', int, 'rshift focus default(focus + 1). type[int]'), # rshift
"<<": ('cls_active.__lshift__', int, 'lshift focus default(focus - 1). type[int]'), # lshift
"+": ('cls_active.__add__', list, 'add new val in intro. type[list]'), # add
"-": ('cls_active.__sub__', list, 'sub val from intro. type[list]'), # sub
"/reset": ('cls_active.reset', lambda x: None, 'reset class.'), # reset
}
cmd = {
# encrypt use base64 > translate pointerdic > encrypted value
"en": (lambda x: x.encode('utf-8'), b64encode, lambda x: x.decode('ASCII').translate(trc_e), 'encrypt data.'),
# decrypt translate pointerdic > base64 > source
"de": (lambda x: x.translate(trc_d), b64decode, lambda x: x.decode('utf-8'), 'decrypt data'),
}
d_inp: [int, str] = lambda x: 1 if x == " " or x == [] else x # input check if need: set
while True:
icm, *inp = str(input("/... >: ")).split(" ") # get command and value
inp = inp[0] if len(inp) == 1 else inp # if 1 value get value else all value
t0 = perf_counter # add time 0
if icm in ("exit", "/*"):
break
elif icm in ("help", "/?"):
t0 = t0()
new = {}
new.update(order)
new.update(cmd)
print("Command\t\tAction")
for k, v in new.items():
print(f"[key]=[{k}]\t[Hint]=[{v[-1]}]", end='\n')
print("Help: ['help'] or ['/?'].\nExit: ['exit'] or ['/*']")
del new, k, v
elif icm in order:
t0 = t0() # add time 0
print(f"order [ {icm} ]\t{inp}")
get_order = f"{order[icm][0]}({order[icm][1](d_inp(inp))})"
exec(get_order)
print(f"result:\n[{order[icm][2]}]\t->G_Hash:[{hash(''.join(cls_active.map['G'].keys()))}]<-")
del get_order, inp
elif icm in cmd:
t0 = t0() # add time 0
print(f"command [ {icm} ]")
check = (1, [], " ", "")
val = str(input('??>: ')) if inp in check else ''.join(inp)
get_cmd = cmd[icm][0](val)
mk_cm = cmd[icm][1](get_cmd)
result = cmd[icm][2](mk_cm)
print(f"result:\n\n->[{result}]<-", end='\n\n')
del val, inp, get_cmd, mk_cm, result
else:
print(ValueError(f"command [ {icm} ] is not exist !"))
total_time = perf_counter() - t0
print(f"!!PROSES TIME: [{total_time:.5f}]\n-->> Pointer_Focus: / {cls_active.focus} /")
return 0
if __name__ == '__main__':
exit(main())
|
from flask import abort, Blueprint, g, make_response, render_template
from portfolio.minify import render_minified
from portfolio.projects import Project
from portfolio.sitemap import Sitemap
site = Blueprint('site', __name__, static_folder='static')
projects = Project()
# home
@site.route('/')
def index():
return render_minified('home.html', projects=projects.ordered())
# project pages
@site.route('/<key>/')
def portfolio(key):
# check if project exists
if not projects.exist(key):
return abort(404)
# load project info
project = projects.get(key)
g.title = project['title']
g.cover = project['cover']
g.tagline = project['tagline']
return render_minified('{}.html'.format(key),
project=project,
suggestions=projects.suggestion(key, 6))
# seo and browser
@site.route('/robots.txt')
def robots():
response = make_response('User-agent: *\nDisallow:')
response.headers['Content-Type'] = 'text/plain'
return response
@site.route('/sitemap.xml')
def sitemap():
info = Sitemap(project_list=projects.order)
xml = render_template('sitemap.xml', pages=info.pages)
response = make_response(xml)
response.headers['Content-Type'] = 'application/xml'
return response
@site.route('/favicon.ico')
def favicon():
return site.send_static_file('imgs/favicon/favicon.ico')
# title and seo info auto generator
@site.context_processor
def title():
# basic values
name = 'Mabel Lazzarin'
about = "{}'s Portfolio | UX & Visual Designer".format(name)
image = 'cover.jpg'
# load page specific values
subtitle = g.get('title', None)
tagline = g.get('tagline', None)
title = '{} | {}'.format(subtitle, name) if subtitle else name
description = tagline if tagline else about
cover = g.get('cover', image)
# set page class
page_class = 'home' if name == title else 'project'
# return values
return {'name': name,
'title': title,
'description': description,
'cover': cover,
'page_class': page_class}
|
from app import app, command_system
from app.scheduledb import ScheduleDB
def auto_posting_off(uid, key, arg=""):
# Если пользователя нет в базе, то ему выведет предложение зарегистрироваться
try:
with ScheduleDB(app.config) as db:
user = db.find_user(uid)
if not user or user[0] == '':
message = "Вас ещё нет в базе данных, поэтому пройдите простую процедуру регистрации:\n"
message += 'Введите команду(без кавычек):\n\nрегистрация "название вуза" "факультет" "группа"\n\n'
message += 'Если вы допустите ошибку, то просто наберите команду заново.\n'
return message
except BaseException as e:
return 'Случилось что то странное, попробуйте ввести команду заново'
try:
with ScheduleDB(app.config) as db:
user = db.find_user(uid)
if user:
if db.set_auto_post_time(uid, None, None):
return 'Автоматическая отправка расписания успешно отключена'
else:
return 'Случилось что то странное, попробуйте ввести команду заново'
else:
return 'Вас ещё нет в базе данных, поэтому пройдите простую процедуру регистрации'
except BaseException as e:
return 'Случилось что то странное, попробуйте ввести команду заново'
auto_posting_off_command = command_system.Command()
auto_posting_off_command.keys = ['ap off', 'автопостинг off', '/auto_posting_off', 'auto_posting_off']
auto_posting_off_command.description = 'Выключение автоматической отправки расписания'
auto_posting_off_command.process = auto_posting_off
|
"""Filters for Kerneladmin."""
def gvariant(lst):
"""Turn list of strings to gvariant list."""
assert isinstance(lst, list), "{} is not a list".format(lst)
content = ", ".join(
"'{}'".format(l) for l in lst
)
return '[{}]'.format(content)
class FilterModule:
"""FilterModule."""
def filters(self):
"""Return all jinja filters."""
return {
'gvariant': gvariant
}
|
#
# PySNMP MIB module DLINK-3100-MIR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-MIR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:33:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Counter32, iso, Unsigned32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, Integer32, MibIdentifier, ModuleIdentity, ObjectIdentity, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "iso", "Unsigned32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "Integer32", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "IpAddress", "Counter64")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
rlMir = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61))
rlMir.setRevisions(('2007-01-02 00:00',))
if mibBuilder.loadTexts: rlMir.setLastUpdated('200701020000Z')
if mibBuilder.loadTexts: rlMir.setOrganization('Dlink, Inc. Dlink Semiconductor, Inc.')
rlMirMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMirMibVersion.setStatus('current')
rlMirMaxNumOfMRIsAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 2), Integer32().clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirMaxNumOfMRIsAfterReset.setStatus('current')
rlMirMaxNumOfMRIs = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMirMaxNumOfMRIs.setStatus('current')
rlMirCurMriNum = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirCurMriNum.setStatus('current')
rlMirInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 5), )
if mibBuilder.loadTexts: rlMirInterfaceTable.setStatus('current')
rlMirInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 5, 1), ).setIndexNames((0, "DLINK-3100-MIR-MIB", "rlMirInterfaceIfIndex"))
if mibBuilder.loadTexts: rlMirInterfaceEntry.setStatus('current')
rlMirInterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 5, 1, 1), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirInterfaceIfIndex.setStatus('current')
rlMirInterfaceMrid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 5, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirInterfaceMrid.setStatus('current')
rlMirVlanBaseReservedPortsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 6), )
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsTable.setStatus('current')
rlMirVlanBaseReservedPortsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 6, 1), ).setIndexNames((0, "DLINK-3100-MIR-MIB", "rlMirVlanBaseReservedPortsIfIndex"))
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsEntry.setStatus('current')
rlMirVlanBaseReservedPortsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 6, 1, 1), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsIfIndex.setStatus('current')
rlMirVlanBaseReservedPortsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 6, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsStatus.setStatus('current')
rlMirVlanBaseLogicalPortsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7), )
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsTable.setStatus('current')
rlMirVlanBaseLogicalPortsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7, 1), ).setIndexNames((0, "DLINK-3100-MIR-MIB", "rlMirVlanBaseLogicalPortsIfIndex"))
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsEntry.setStatus('current')
rlMirVlanBaseLogicalPortsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7, 1, 1), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsIfIndex.setStatus('current')
rlMirVlanBaseLogicalPortsReservedIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7, 1, 2), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsReservedIfIndex.setStatus('current')
rlMirVlanBaseLogicalPortsVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsVlanTag.setStatus('current')
rlMirVlanBaseLogicalPortsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 61, 7, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsStatus.setStatus('current')
mibBuilder.exportSymbols("DLINK-3100-MIR-MIB", rlMirCurMriNum=rlMirCurMriNum, rlMirVlanBaseLogicalPortsEntry=rlMirVlanBaseLogicalPortsEntry, rlMirVlanBaseReservedPortsIfIndex=rlMirVlanBaseReservedPortsIfIndex, rlMirVlanBaseLogicalPortsReservedIfIndex=rlMirVlanBaseLogicalPortsReservedIfIndex, rlMirMaxNumOfMRIsAfterReset=rlMirMaxNumOfMRIsAfterReset, rlMirInterfaceTable=rlMirInterfaceTable, rlMirInterfaceIfIndex=rlMirInterfaceIfIndex, rlMirVlanBaseLogicalPortsStatus=rlMirVlanBaseLogicalPortsStatus, rlMirVlanBaseLogicalPortsIfIndex=rlMirVlanBaseLogicalPortsIfIndex, rlMirInterfaceEntry=rlMirInterfaceEntry, PYSNMP_MODULE_ID=rlMir, rlMirVlanBaseReservedPortsStatus=rlMirVlanBaseReservedPortsStatus, rlMirMaxNumOfMRIs=rlMirMaxNumOfMRIs, rlMirMibVersion=rlMirMibVersion, rlMirVlanBaseLogicalPortsVlanTag=rlMirVlanBaseLogicalPortsVlanTag, rlMirVlanBaseReservedPortsTable=rlMirVlanBaseReservedPortsTable, rlMirVlanBaseReservedPortsEntry=rlMirVlanBaseReservedPortsEntry, rlMirVlanBaseLogicalPortsTable=rlMirVlanBaseLogicalPortsTable, rlMir=rlMir, rlMirInterfaceMrid=rlMirInterfaceMrid)
|
import re
from bs4 import BeautifulSoup
from LimeSoup.lime_soup import Soup, RuleIngredient
from LimeSoup.parser.parser_paper_wiley import ParserPaper
__author__ = 'Zach Jensen'
__maintainer__ = ''
__email__ = 'zjensen@mit.edu'
__version__ = '0.3.0'
class WileyRemoveTagsSmallSub(RuleIngredient):
@staticmethod
def _parse(html_str):
"""
Deal with spaces in the sub, small tag and then remove it.
"""
parser = ParserPaper(html_str, parser_type='html.parser', debugging=True)
rules = [{'name':'i'},
{'name':'sub'},
{'name':'sup'},
{'name':'b'},
{'name':'em'}]
parser.operation_tag_remove_space(rules)
# Remove some specific all span that are inside of a paragraph 'p'
parser.strip_tags(rules)
tags = parser.soup.find_all(**{'name': 'p'})
for tag in tags:
tags_inside_paragraph = tag.find_all(**{'name': 'span'})
for tag_inside_paragraph in tags_inside_paragraph:
tag_inside_paragraph.replace_with_children()
# Remove some specific span that are inside of a span and p
parser.strip_tags(rules)
tags = parser.soup.find_all(**{'name': re.compile('span|p')})
for tag in tags:
for rule in rules:
tags_inside_paragraph = tag.find_all(**rule)
for tag_inside_paragraph in tags_inside_paragraph:
tag_inside_paragraph.replace_with_children()
# Recreating the ParserPaper bug in beautifulsoup
html_str = str(parser.soup)
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
return parser.raw_html
class WileyRemoveTrash(RuleIngredient):
@staticmethod
def _parse(html_str):
list_remove = [
{'name': 'div', 'class': 'loa-wrappers loa_authors hidden-xs'},
{'name':'div', 'class':'article-header__authors-container'}, # Authors X
{'name': 'div', 'id': 'art-admin'}, # Data rec./accept.
{'name': 'div', 'class': 'article-section__inline-figure'},
{'name': 'section', 'class':'article-section article-section--inline-figure'},
{'name':'figure'}, # Figures X
{'name': 'div', 'id': 'crossmark-content'}, # Another Logo
{'name': 'code'}, # Codes inside the HTML
{'name': 'div', 'class': 'article-table-content'}, # Remove table X
{'name': 'header', 'class': 'page-header'}, # Navigation links X
{'name': 'div', 'class':'page-footer'},
{'name':'div', 'class':'js-module notification'},
{'name':'img'},
{'name':'aside'},
{'name':'div', 'class':'issue-header js-module'},
{'name':'span', 'class':'article-header__category article-category'},
{'name':'article-header__meta-info-container'},
{'name':'a', 'class':'figZoom'},
{'name':'ul', 'class':'meta-navigation u-list-plain'},
{'name':'div', 'id':'js-article-nav'},
{'name':'section', 'id':'pdf-section'},
{'name':'section', 'class':'article-footer__section article-section'},
{'name':'div', 'class':'l-article-support'},
{'name':'footer', 'role':'contentinfo'},
{'name':'div', 'data-module':'infoplane'},
{'name':'header', 'role':'banner'},
{'name':'header', 'class':'journal-header'},
{'name':'div', 'class':'article-header__meta-info-container'},
{'name':'div', 'class':'article-header__references-container'},
{'name':'section', 'id':'footer-article-info'},
{'name':'section', 'id':'related-content'},
{'name':'section', 'id':'footer-citing'},
{'name':'section', 'id':'footer-support-info'},
{'name':'ul', 'class':'article-section__references-list-additional u-horizontal-list'}, # Remove Footnote X
{'name':'a', 'class':'bibLink tab-link'}, # remove references
{'name':'a', 'class':'link__reference js-link__reference'},
{'name':'div', 'class':'loa-wrapper loa-authors hidden-xs'},
{'name':'div', 'class':'rlist--inline loa comma visible-xs mobile-authors loa-authors-trunc'},
{'name':'div', 'class':'readCube-sharing hidden'},
{'name':'div', 'class':'modal__header'},
{'name':'div', 'class':'modal__body'},
{'name':'div', 'class':'readCube-sharing hidden'},
{'name':'div', 'class':'ux-modal-container readCube-sharing__modal'},
{'name':'div', 'class':'share__block dropBlock__holder fixed'},
{'name':'div', 'class':'article-citation'},
{'name':'span', 'class':'inline-equation__label'},
{'name':'div', 'class':'accordion article-accordion'},
]
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
parser.remove_tags(rules=list_remove)
parser.remove_tag(
rules=[{'name': 'p', 'class': 'bold italic', 'string': parser.compile('First published on')}]
)
return parser.raw_html
class WileyCreateTags(RuleIngredient):
@staticmethod
def _parse(html_str):
# This create a standard of sections tag name
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
parser.create_tag_sections()
return parser.raw_html
class WileyCreateTagAbstract(RuleIngredient):
@staticmethod
def _parse(html_str):
# Create tag from selection function in ParserPaper
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
parser.create_tag_from_selection(
rule={'name': 'p', 'class': 'abstract'},
name_new_tag='h2'
)
# Guess introductions
#parser.create_tag_to_paragraphs_inside_tag(
# # rule={'name': 'section_h1'},
# name_new_tag='h2',
# name_section='Introduction(guess)'
# )
return parser.raw_html
class WileyReplaceDivTag(RuleIngredient):
@staticmethod
def _parse(html_str):
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
rules = [{'name': 'div'}]
parser.strip_tags(rules)
rules = [{'name': 'span', 'id': parser.compile('^sect[0-9]+$')}] # some span are heading
_ = parser.strip_tags(rules)
return parser.raw_html
class WileyCollect(RuleIngredient):
@staticmethod
def _parse(html_str):
soup = BeautifulSoup(html_str, 'html.parser')
parser = ParserPaper(html_str, parser_type='html.parser', debugging=False)
# Collect information from the paper using ParserPaper
keywords = soup.find_all(attrs={'name':'citation_keywords'})
keys = []
for key in keywords:
keys.append(parser.format_text(key.get('content')))
journal_name = soup.find(attrs={'name':'citation_journal_title'})
journal_name = parser.format_text(journal_name.get('content'))
doi = soup.find(attrs={'name':'citation_doi'})
doi = doi.get('content')
title = soup.find(attrs={'name':'citation_title'})
title = parser.format_text(title.get('content'))
# Create tag from selection function in ParserPaper
data = list()
"""
Can deal with multiple Titles in the same paper
"""
parser.deal_with_sections()
data = parser.data_sections
index2 = max(len(data)-1,1)
check = ['Abstract', 'Acknowledgements', 'Experimental Section', 'Supporting Information']
no_sections = True
for d in data:
if d['name'] not in check:
no_sections = False
if no_sections:
section = soup.find_all('section')
for sect in section:
if (sect.get('class') is not None and ('article-section__full' in sect.get('class') or
(isinstance(sect.get('class'), list) and len(sect.get('class'))>1 and 'article-body-section' in sect.get('class')[1]))):
paragraphs = sect.find_all('p')
for p in paragraphs:
skip = False
ul = p.find('ul')
if ul is not None and ul.get('class') is not None and 'rlist' in ul.get('class'):
skip = True
pars = p.parents
for par in pars:
if (par.get('class') is not None and ('supporting' in par.get('class') or 'references' in par.get('class') or
'citedby' in par.get('class'))):
skip = True
for d in data:
for c in d['content']:
if isinstance(c, str):
if c == parser.format_text(p.text):
skip = True
elif isinstance(c, dict):
d2 = c['content']
for c2 in d2:
if isinstance(c2, str):
if c2 == parser.format_text(p.text):
skip = True
elif isinstance(c2, dict):
d3 = c2['content']
for c3 in d3:
if c3 == parser.format_text(p.text):
skip = True
if not skip:
text = parser.format_text(p.text)
# text = ''.join(filter(lambda x: x in string.printable, text)) Can be useful for formating but can remove characters
if text[-1] != '.':
index = text.rfind('.')
text = text[:index+1]
if text == data[-1]['content'][0]:
continue
obj = {
'type':'section_h2',
'name':'',
'content':[text]
}
data.insert(-1*index2, obj)
obj = {
'DOI': doi,
'Title': title,
'Keywords': keys,
'Journal': journal_name,
'Sections': data
}
return obj
WileySoup = Soup(parser_version=__version__)
WileySoup.add_ingredient(WileyRemoveTagsSmallSub())
WileySoup.add_ingredient(WileyRemoveTrash())
WileySoup.add_ingredient(WileyCreateTags())
# WileySoup.add_ingredient(WileyCreateTagAbstract())
WileySoup.add_ingredient(WileyReplaceDivTag())
WileySoup.add_ingredient(WileyCollect())
|
def hw1(inp, out):
f = open(inp, 'r')
l = open(out, 'w')
out_arr = [[0 for i in range(0, 123)] for i in range(len(f))]
inp_arr = []
for line in f:
inp_arr.append(line)
for i in range(0,len(f)):
for j in inp_arr[i]:
out_arr[i][ord(j)] += 1
flag = True
for j in range(97, 123):
if (out_arr[i][j] < 1):
flag = False
break
if flag == True:
l.write("true\n")
else:
l.write("false\n")
f.close()
l.close()
|
import numpy as np
import math
import trimesh
import open3d as o3d
def reconstruct_surface(input_point_pos):
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(input_point_pos)
point_cloud.estimate_normals()
point_cloud.orient_normals_consistent_tangent_plane(10)
distances = point_cloud.compute_nearest_neighbor_distance()
avg_dist = np.mean(distances)
radius = 3 * avg_dist
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(point_cloud ,o3d.utility.DoubleVector([radius, radius * 2]))
return mesh
def in_segment(point, start_point, end_point):
v_start = start_point - point
v_end = end_point - point
if np.dot(v_start, v_end) > 0:
return False
else:
return True
def rotate_according_to_origin(points, center_pos, r_mat):
# move to origin
points_origin = points - center_pos
# rotate
points_rotate = np.dot(points_origin, np.transpose(r_mat))
# move back
points_back = points_rotate + center_pos
return points_back
def rotation_matrix_from_vectors(src, des):
a, b = (src / np.linalg.norm(src)).reshape(3), (des / np.linalg.norm(des)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def compute_angle(v1, v2):
dot_product = np.dot(v1, v2)
angle = np.arccos(dot_product)
return angle
def vector_length(vector):
return np.sqrt(np.sum(vector**2))
def normalized_vector(vector):
return vector/vector_length(vector)
def two_points_distances(p1, p2):
vector_p1_p2 = p2 - p1
distance = vector_length(vector_p1_p2)
return distance
def align_points_to_plane(line,
rec,
ori_normal,
des_normal,
align_end_points_vector,
align_end_points_center):
# compute center
rec_center = (rec[0] + rec[2]) / 2.
# translate to the origin
line = line - rec_center
rec = rec - rec_center
# rotate to align plane
# check if the normal is same direction
r_mat_to_plane = rotation_matrix_from_vectors(ori_normal, des_normal)
line = np.dot(line, np.transpose(r_mat_to_plane))
rec = np.dot(rec, np.transpose(r_mat_to_plane))
# rotate to align start, end point
vector_start_end = line[len(line) - 1] - line[0]
r_mat_to_vector = rotation_matrix_from_vectors(vector_start_end, align_end_points_vector)
line = np.dot(line, np.transpose(r_mat_to_vector))
rec = np.dot(rec, np.transpose(r_mat_to_vector))
# translate to the position according to the center of end points
translate_vector = align_end_points_center - (line[0] + line[len(line) - 1])/2.
line = line + translate_vector
rec = rec + translate_vector
return line, rec, r_mat_to_plane, r_mat_to_vector
def scale_points(points, x_factor, y_factor, z_factor):
s_mat = np.array([[x_factor, 0., 0.], [0., y_factor, 0.], [0., 0., z_factor]])
points = np.dot(points, np.transpose(s_mat))
return points
def screen_pos_to_world_ray(mouseX, mouseY, # Mouse position, in pixels, from top-left corner of the window
screenWidth, screenHeight, # Window size, in pixels
ProjectionMatrix, # Camera parameters (ratio, field of view, near and far planes)
ViewMatrix, # Camera position and orientation
):
InverseProjectionMatrix = np.linalg.inv(ProjectionMatrix)
InverseViewMatrix = np.linalg.inv(ViewMatrix)
# Transform into normalised device coordinates
x = (2.0 * float(mouseX)) / float(screenWidth) - 1.0
y = 1.0 - (2.0 * float(mouseY)) / float(screenHeight)
# 4d Homogeneous Clip Coordinates
ray_clip = np.array([x, y, -1.0, 1.0])
# 4d Eye (Camera) Coordinates
ray_eye = np.matmul(InverseProjectionMatrix, ray_clip)
ray_eye = np.array([ray_eye[0], ray_eye[1], -1.0, 0.0])
# 4d World Coordinates
ray_world = np.matmul(InverseViewMatrix, ray_eye)[:3]
ray_world = ray_world / np.sqrt(np.sum(ray_world**2)) # normalize
return ray_world
def world_pos_to_screen_pos(worldPos, # Mouse position, in pixels, from top-left corner of the window
screenWidth, screenHeight, # Window size, in pixels
ProjectionMatrix, # Camera parameters (ratio, field of view, near and far planes)
ViewMatrix, # Camera position and orientation
):
# 4d Eye (Camera) Coordinates
world_pos = np.array([worldPos[0], worldPos[1], worldPos[2], 1.])
camera_pos = np.matmul(ViewMatrix, world_pos)
# 4d Homogeneous Clip Coordinates
clip_pos = np.matmul(ProjectionMatrix, camera_pos)
# Transform into normalised device coordinates
# divide w
ndc_pos = clip_pos
if clip_pos[3] != 0:
ndc_pos = clip_pos / clip_pos[3]
# screen pos
screen_x = (ndc_pos[0] + 1.0) * float(screenWidth) / 2.0
screen_y = (ndc_pos[1] - 1.0) * float(screenHeight) / -2.0
screen_pos = [screen_x, screen_y]
return screen_pos
# ray v.s. faces, faces number can be 1
def ray_mesh_face_hit_detection(ray_origin, ray_dir, vertices):
hit = False
if len(vertices) == 0:
return []
face_mesh = trimesh.Trimesh(vertices=vertices, faces=[[0, 1, 2, 3]])
hit_point, _, _ = face_mesh.ray.intersects_location([ray_origin], [ray_dir], multiple_hits=False)
if len(hit_point) != 0:
hit = True
return hit, hit_point
def ray_plane_hit_detection(plane_point, plane_normal, ray_origin, ray_dir):
hit = True
hit_point = None
t = (np.dot(plane_point, plane_normal) - np.dot(ray_origin, plane_normal)) / np.dot(ray_dir, plane_normal)
if t < 0. or t > 10.:
hit = False
t = None
else:
hit_point = ray_origin + ray_dir*t
return hit, hit_point, t
# vertices
# 0................3
# . .
# . .
# . .
# . .
# 1 ...............2
#
def ray_plane_hit_detection_with_boundary(ray_origin, ray_dir, vertices):
hit = False
hit_point = None
vector_x = vertices[3] - vertices[0]
vector_y = vertices[1] - vertices[0]
vector_z = np.cross(vector_x, vector_y)
hit_plane, hit_point, t = ray_plane_hit_detection(plane_point=vertices[0],
plane_normal=vector_z,
ray_origin=ray_origin ,
ray_dir=ray_dir)
if hit_plane:
# check if the hit point is inside the rectangle
check_vector = hit_point - vertices[0]
x_length = np.sqrt(np.sum(vector_x**2))
y_length = np.sqrt(np.sum(vector_y**2))
normalize_x = vector_x/x_length
normalize_y = vector_y/y_length
# compute the projection on x, y axis
x_proj_length = np.dot(check_vector, normalize_x)
y_proj_length = np.dot(check_vector, normalize_y)
if (x_proj_length > 0 and y_proj_length > 0) \
and (x_proj_length < x_length and y_proj_length < y_length):
hit = True
return hit, hit_point, t
# ray v.s. point cloud
def ray_point_cloud_hit_detection(ray_origin, ray_dir, point_cloud_positions, hit_radius):
valid_hit_point_id = []
point_ts = np.array([ ray_point_hit_detection(point_pos, ray_origin, ray_dir, hit_radius = hit_radius) for point_pos in point_cloud_positions ])
hit_point_id = np.argmin(point_ts)
# if no hit
if point_ts[hit_point_id] == 1000.:
return valid_hit_point_id
else:
valid_hit_point_id.append(hit_point_id)
return valid_hit_point_id
def ray_point_hit_detection(point_pos, ray_origin, ray_dir, hit_radius):
b = np.dot(ray_dir, (ray_origin - point_pos))
c = np.dot((ray_origin - point_pos), (ray_origin - point_pos)) - hit_radius*hit_radius
check_hit_value = b*b - c
# if no hit
if check_hit_value < 0.:
return 1000.
elif check_hit_value > 0.:
t_plus = -b + math.sqrt(check_hit_value)
t_minus = -b - math.sqrt(check_hit_value)
return min(t_plus, t_minus)
else:
return -b
# ray detection with the surface of a circle
def ray_circle_hit_detection(plane_center, plane_normal, ray_origin, ray_dir, hit_radius, thickness):
hit = False
hit_point = None
# implement ray plane hit detection first with the circle plane
hit_plane, hit_point, t = ray_plane_hit_detection(plane_center, plane_normal, ray_origin, ray_dir)
if hit_plane:
# check the distance between the hit point and the center
vector_center_hit = hit_point - plane_center
distance = np.sqrt(np.sum(vector_center_hit**2))
if distance < (hit_radius + thickness) and distance > (hit_radius - thickness):
hit = True
return hit, hit_point, t
def ray_axis_hit_detection(axis_start, axis_end, axis, ray_origin, ray_dir, thickness):
hit = False
hit_point_on_axis = None
ray_t = None
# find the nearest points of two lines
# compute the vector that is prependicular to two lines
n = np.cross(axis, ray_dir)
# compute a plane by line1 and n, the nearest point on line2 is on the plane
n1 = np.cross(axis, n)
# compute a plane by line2 and n, the nearest point on line1 is on the plane
n2 = np.cross(ray_dir, n)
# find the intersection point of ray1 and the plane of line2
hit1, hit_point1, t1 = ray_plane_hit_detection(ray_origin, n2, axis_start, axis)
# find the intersection point of ray2 and the plane of line1
hit2, hit_point2, t2 = ray_plane_hit_detection(axis_start, n1, ray_origin, ray_dir)
if hit1 and hit2:
vector_hit1_hit2 = hit_point2 - hit_point1
vector_hit1_hit2_length = np.sqrt(np.sum(vector_hit1_hit2**2))
if vector_hit1_hit2_length < thickness and in_segment(hit_point1, axis_start, axis_end):
hit_point_on_axis = hit_point1
ray_t = t2
hit = True
return hit, hit_point_on_axis, ray_t
#
def fix_rec(rec):
print("fix_rec")
rec = np.array(rec)
fixed_rec = []
for corner_id, corner in enumerate(rec):
last_id = corner_id - 1
next_id = corner_id + 1
if corner_id == 0:
last_id = 3
elif corner_id == 3:
next_id = 0
last_corner = rec[last_id]
next_corner = rec[next_id]
last_vector = last_corner - corner
next_vector = next_corner - corner
last_length = vector_length(last_vector)
next_length = vector_length(next_vector)
cos_value = np.dot(normalized_vector(last_vector), normalized_vector(next_vector))
if cos_value < 0.:
print('cos')
if next_length > last_length:
projection_length = abs(last_length*cos_value)
point = corner - normalized_vector(next_vector)*projection_length
fixed_rec.append(point)
else:
projection_length = abs(next_length*cos_value)
point = corner - normalized_vector(last_vector)*projection_length
fixed_rec.append(point)
else:
fixed_rec.append(corner)
return fixed_rec
|
# coding: utf-8
from . import views
from django.conf.urls import url
urlpatterns = [
url(
r'^create/(?P<app_label>\w+)/(?P<model>\w+)/(?P<obj_id>\d+)/$',
views.ExampleCreateView.as_view(),
name='create'
),
url(
r'^update/(?P<pk>\d+)/$',
views.ExampleUpdateView.as_view(),
name='update'
),
]
|
import os
from boa_test.tests.boa_test import BoaTest
from boa.compiler import Compiler
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.VM.ExecutionEngine import ExecutionEngine
from mock import patch
from neo.Settings import settings
from logging import DEBUG, INFO
import binascii
class StringIn(str):
def __eq__(self, other):
return self in other
class TestUnclosedWhileLoop(BoaTest):
engine = ExecutionEngine()
script = None
@classmethod
def setUpClass(cls):
super(TestUnclosedWhileLoop, cls).setUpClass()
# the following script is a simple contract that is basically `while True`
cls.script = binascii.unhexlify(b'00c56b620000')
settings.set_loglevel(DEBUG)
@classmethod
def tearDownClass(cls):
super(TestUnclosedWhileLoop, cls).tearDownClass()
settings.set_loglevel(INFO)
@patch('logzero.logger.debug')
def test_unclosed_loop_script(self, mocked_logger):
tx, results, total_ops, engine = TestBuild(self.script, [], self.GetWallet1(), '', 'ff')
mocked_logger.assert_called_with(StringIn('Too many free operations processed'))
|
from app.logic_gates.gates import Gates
gates = {
1: 'not',
2: 'or',
3: 'and',
4: 'nor',
5: 'nand',
6: 'xor'
}
class LogicGates:
def __init__(self) -> None:
self.get_option()
self.get_value()
self.result()
def show_gates(self) -> None:
for key in gates:
print(f'[{key}] - {gates[key]}')
def get_option(self) -> None:
print('Selecione a porta:')
self.show_gates()
option = int(input('>>> '))
if option not in gates.keys():
print('Opção inválida!')
else:
self.option = gates[option]
def get_value(self) -> None:
print(f'Digite o valor de entrada:')
self.value = input('>>> ')
def result(self) -> None:
gate = Gates()
if self.option == 'not':
print(f'Resultado: {gate.not_gate(int(self.value))}')
elif self.option == 'or':
print(f'Resultado: {gate.or_gate(self.value)}')
elif self.option == 'and':
print(f'Resultado: {gate.and_gate(self.value)}')
elif self.option == 'nor':
print(f'Resultado: {gate.nor_gate(self.value)}')
elif self.option == 'nand':
print(f'Resultado: {gate.nand_gate(self.value)}')
elif self.option == 'xor':
print(f'Resultado: {gate.xor_gate(self.value)}')
|
from __future__ import print_function
import os
import sys
import math
import pickle
import boto3
import os
import numpy as np
import kg
import pandas as pd
# from tqdm import tqdm
import time
import argparse
import json
import logging
import re
import dglke
# tqdm.pandas()
# pandarallel.initialize(progress_bar=True)
# bucket = os.environ.get("BUCKET_NAME", " ")
# raw_data_folder = os.environ.get("RAW_DATA", " ")
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
# tqdm_notebook().pandas()
print("dglke version:", dglke.__version__)
########################################
# 从s3同步数据
########################################
def sync_s3(file_name_list, s3_folder, local_folder):
for f in file_name_list:
print("file preparation: download src key {} to dst key {}".format(os.path.join(
s3_folder, f), os.path.join(local_folder, f)))
s3client.download_file(bucket, os.path.join(
s3_folder, f), os.path.join(local_folder, f))
def write_to_s3(filename, bucket, key):
print("upload s3://{}/{}".format(bucket, key))
with open(filename, 'rb') as f: # Read in binary mode
# return s3client.upload_fileobj(f, bucket, key)
return s3client.put_object(
ACL='bucket-owner-full-control',
Bucket=bucket,
Key=key,
Body=f
)
def write_str_to_s3(content, bucket, key):
print("write s3://{}/{}, content={}".format(bucket, key, content))
s3client.put_object(Body=str(content).encode(
"utf8"), Bucket=bucket, Key=key, ACL='bucket-owner-full-control')
region = None
param_path = os.path.join('/opt/ml/', 'input/config/hyperparameters.json')
if os.path.exists(param_path):
print("load param from {}".format(param_path))
with open(param_path) as f:
hp = json.load(f)
bucket = hp['bucket']
prefix = hp['prefix']
region = hp.get("region")
else:
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', type=str)
parser.add_argument('--prefix', type=str)
parser.add_argument("--region", type=str, help="aws region")
args, _ = parser.parse_known_args()
bucket = args.bucket
prefix = args.prefix
if args.region:
region = args.region
if region:
print("region:", region)
boto3.setup_default_session(region_name=region)
if prefix.endswith("/"):
prefix = prefix[:-1]
print("bucket={}".format(bucket))
print("prefix='{}'".format(prefix))
s3client = boto3.client('s3')
out_s3_path = "s3://{}/{}/feature/content/inverted-list".format(bucket, prefix)
local_folder = 'info'
if not os.path.exists(local_folder):
os.makedirs(local_folder)
# prepare model for batch process
meta_file_prefix = "{}/model/meta_files".format(prefix)
os.environ['GRAPH_BUCKET'] = bucket
os.environ['KG_DBPEDIA_KEY'] = '{}/kg_dbpedia.txt'.format(meta_file_prefix)
os.environ['KG_ENTITY_KEY'] = '{}/entities_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_KEY'] = '{}/relations_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_DBPEDIA_TRAIN_KEY'] = '{}/kg_dbpedia_train.txt'.format(
meta_file_prefix)
os.environ['KG_ENTITY_TRAIN_KEY'] = '{}/entities_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_TRAIN_KEY'] = '{}/relations_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_ENTITY_INDUSTRY_KEY'] = '{}/entity_industry.txt'.format(
meta_file_prefix)
os.environ['KG_VOCAB_KEY'] = '{}/vocab.json'.format(meta_file_prefix)
os.environ['DATA_INPUT_KEY'] = ''
os.environ['TRAIN_OUTPUT_KEY'] = '{}/model/rank/content/dkn_embedding_latest/'.format(
prefix)
kg_path = os.environ['GRAPH_BUCKET']
dbpedia_key = os.environ['KG_DBPEDIA_KEY']
entity_key = os.environ['KG_ENTITY_KEY']
relation_key = os.environ['KG_RELATION_KEY']
dbpedia_train_key = os.environ['KG_DBPEDIA_TRAIN_KEY']
entity_train_key = os.environ['KG_ENTITY_TRAIN_KEY']
relation_train_key = os.environ['KG_RELATION_TRAIN_KEY']
entity_industry_key = os.environ['KG_ENTITY_INDUSTRY_KEY']
vocab_key = os.environ['KG_VOCAB_KEY']
data_input_key = os.environ['DATA_INPUT_KEY']
train_output_key = os.environ['TRAIN_OUTPUT_KEY']
env = {
'GRAPH_BUCKET': kg_path,
'KG_DBPEDIA_KEY': dbpedia_key,
'KG_ENTITY_KEY': entity_key,
'KG_RELATION_KEY': relation_key,
'KG_DBPEDIA_TRAIN_KEY': dbpedia_train_key,
'KG_ENTITY_TRAIN_KEY': entity_train_key,
'KG_RELATION_TRAIN_KEY': relation_train_key,
'KG_ENTITY_INDUSTRY_KEY': entity_industry_key,
'KG_VOCAB_KEY': vocab_key,
'DATA_INPUT_KEY': data_input_key,
'TRAIN_OUTPUT_KEY': train_output_key
}
print("Kg env: {}".format(env))
graph = kg.Kg(env, region=region) # Where we keep the model when it's loaded
# model = encoding.encoding(graph, env)
graph.train()
# graph.train(max_step=2000)
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_cb_protection.util.bit9_client import CbProtectClient, escape
from resilient_lib import validate_fields
log = logging.getLogger(__name__)
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'bit9_file_rule_query"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts["fn_cb_protection"]
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts["fn_cb_protection"]
@function("bit9_file_rule_query")
def _bit9_file_rule_query_function(self, event, *args, **kwargs):
"""Function: Return file rules that match the given criteria."""
try:
validate_fields(["bit9_query"], kwargs)
# Get the function parameters:
bit9_query = kwargs.get("bit9_query") # text
log.info(u"bit9_query: %s", bit9_query)
# Query example: 'id:6' (see https://<server>/api/bit9platform/v1 for details)
bit9_client = CbProtectClient(self.options)
results = bit9_client.query_file_rule(bit9_query)
# Query results should be a list
if isinstance(results, list):
log.info("%d results", len(results))
results = {
"count": len(results),
"items": results
}
log.debug(results)
else:
log.warn(u"Expected a list but received:")
log.warn(results)
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as err:
log.error(err)
yield FunctionError(err)
|
import argparse
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np # won't need this when data on 3rd dose for 12-17 year olds becomes available
import os
from vaccine_dataprep_Swedentots import (
first_two_vacc_dose_lan,
third_vacc_dose_lan,
fourth_vacc_dose,
)
aparser = argparse.ArgumentParser(description="Generate text insert json")
aparser.add_argument("--output-dir", nargs="?", default="vaccine_plots",
help="Output directory where the files will be saved")
args = aparser.parse_args()
## Need 3 sets of data - for one dose, two doses, and three doses
# Don't have population size data for these age groups (at least right now), so can't do population level calculations
## data for 3rd dose is held separately - work with data for 1st 2 doses first
first_two_vacc_dose_lan = first_two_vacc_dose_lan[(first_two_vacc_dose_lan["Region"] == "Sweden")]
# Need to change terminology used for the '90 or older' age group
first_two_vacc_dose_lan = first_two_vacc_dose_lan.replace("90 eller äldre", "90+")
# We drop the 'totals' in the dataset as we don't want them
first_two_vacc_dose_lan.drop(
first_two_vacc_dose_lan[(first_two_vacc_dose_lan["Åldersgrupp"] == "Totalt")].index,
inplace=True,
)
# recaculate as a percentage for each age group.
first_two_vacc_dose_lan["Procent vaccinerade"] = (
first_two_vacc_dose_lan["Andel vaccinerade"] * 100
)
# Separate data for one and two doses
# one dose
one_dose = first_two_vacc_dose_lan[
(first_two_vacc_dose_lan["Vaccinationsstatus"] == "Minst 1 dos")
]
one_dose = one_dose[["Åldersgrupp", "Procent vaccinerade", "Vaccinationsstatus"]]
one_dose.reset_index(drop=True, inplace=True)
# data for two doses
two_doses = first_two_vacc_dose_lan[
(first_two_vacc_dose_lan["Vaccinationsstatus"] == "Minst 2 doser")
]
two_doses = two_doses[["Åldersgrupp", "Procent vaccinerade", "Vaccinationsstatus"]]
two_doses.reset_index(drop=True, inplace=True)
## Sort data for three doses. Note - data only currently available for 18+ (from 12 for 1 & 2 dose)
# Limit data to just Sweden and modify for the 90+ age group
third_vacc_dose_lan = third_vacc_dose_lan[(third_vacc_dose_lan["Region"] == "Sweden")]
third_vacc_dose_lan = third_vacc_dose_lan.replace("90 eller äldre", "90+")
# Calculate values as percentages
third_vacc_dose_lan.drop(
third_vacc_dose_lan[(third_vacc_dose_lan["Åldersgrupp"] == "Totalt")].index,
inplace=True,
)
third_vacc_dose_lan["Procent vaccinerade"] = (
third_vacc_dose_lan["Andel vaccinerade"] * 100
)
third_vacc_dose_lan = third_vacc_dose_lan[
["Åldersgrupp", "Procent vaccinerade", "Vaccinationsstatus"]
]
# For now, we need to add two age categories for the third dose (12-15, 16-17)
## REMOVE THIS ROW WHEN THESE AGE CATEGORIES ARE AVAILABLE FOR THIRD DOSE DATA
top_row = pd.DataFrame(
{
"Åldersgrupp": ["12-15", "16-17"],
"Procent vaccinerade": [np.nan, np.nan],
"Vaccinationsstatus": ["3 doser", "3 doser"],
}
)
third_dose = pd.concat([top_row, third_vacc_dose_lan]).reset_index(drop=True)
# Add fourth dose (already as percentages from dataprep, so not needed)
# do need to add additional age group rows (until more are added amd change 90+ )
# Also need to eliminate 'totalt' row
fourth_vacc_dose = fourth_vacc_dose.replace("90 eller äldre", "90+")
# REMOVE BELOW AS MORE AGE CATEGORIES ARE ADDED
top_row_fourth = pd.DataFrame(
{
"Åldersgrupp": [
"12-15",
"16-17",
"18-29",
"30-39",
"40-49",
"50-59",
"60-69",
],
"Procent vaccinerade": [
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"Vaccinationsstatus": [
"4 doser",
"4 doser",
"4 doser",
"4 doser",
"4 doser",
"4 doser",
"4 doser",
],
}
)
fourth_dose = pd.concat([top_row_fourth, fourth_vacc_dose]).reset_index(drop=True)
fourth_dose = fourth_dose[fourth_dose.Åldersgrupp != "Totalt"]
fourth_dose = fourth_dose[fourth_dose.Åldersgrupp != "65-69"]
## Prepare dataframe for heatmap (all data in one place)
heatmap_data = pd.concat(
[one_dose, two_doses, third_dose, fourth_dose],
axis=0,
)
heatmap_data["Vaccinationsstatus"] = heatmap_data["Vaccinationsstatus"].replace(
{
"Minst 1 dos": "1",
"Minst 2 doser": "2",
"3 doser": "3",
"4 doser": "4",
}
)
## Make heatmap figures (one small for front of portal, and one larger for page)
## Same data will be included in both
colours = px.colors.diverging.RdBu
fig_small = go.Figure(
data=go.Heatmap(
z=heatmap_data["Procent vaccinerade"],
zmin=0,
zmax=100,
x=heatmap_data["Vaccinationsstatus"],
y=heatmap_data["Åldersgrupp"],
xgap=1,
ygap=1,
colorbar={
"title": "<b>Percentage of <br>Population Vaccinated<br> </b>",
"yanchor": "top",
"y": 1.0,
"lenmode": "fraction",
"len": 0.95,
"tickvals": [
5,
15,
25,
35,
45,
55,
65,
75,
85,
95,
],
"ticktext": [
"00.00-9.99%",
"10.00-19.99%",
"20.00-29.99%",
"30.00-39.99%",
"40.00-49.99%",
"50.00-59.99%",
"60.00-69.99%",
"70.00-79.99%",
"80.00-89.99%",
"90.00-100.00%",
],
},
colorscale=[
[0.0, colours[10]],
[0.1, colours[10]],
[0.1, colours[9]],
[0.2, colours[9]],
[0.2, colours[8]],
[0.3, colours[8]],
[0.3, colours[7]],
[0.4, colours[7]],
[0.4, colours[6]],
[0.5, colours[6]],
[0.5, "rgb(255,255,204)"],
[0.6, "rgb(255,255,204)"],
[0.6, colours[4]],
[0.7, colours[4]],
[0.7, colours[3]],
[0.8, colours[3]],
[0.8, colours[2]],
[0.9, colours[2]],
[0.9, colours[1]],
[1.0, colours[1]],
],
hovertemplate="<extra></extra>Vaccine Doses Received: %{x} <br>Age Category: %{y}<br>Percentage Vaccinated: %{z:.2f}%",
)
)
fig_small.update_layout(
hoverlabel={
"bgcolor": "white",
"font_size": 12,
}
)
fig_small.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig_small.update_layout(
title=" ",
plot_bgcolor="white",
yaxis={
"title": "<b>Age Group</b>",
"linecolor": "black",
},
font={"size": 12},
# width=2000, # Don't set width/height, it's set in Portal
# height=300, # It's the legend length and font that make this heatmap 'small'
xaxis={
"title": "<b>Doses Received</b>",
"tickangle": 0,
"zeroline": True,
"linecolor": "black",
},
)
# fig_small.show()
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
fig_small.write_json(os.path.join(args.output_dir, "vaccine_heatmap_small.json"))
# fig_small.write_image("Plots/vaccine_heatmap_small.png")
# Now make the larger version
fig = go.Figure(
data=go.Heatmap(
z=heatmap_data["Procent vaccinerade"],
zmin=0,
zmax=100,
x=heatmap_data["Vaccinationsstatus"],
y=heatmap_data["Åldersgrupp"],
xgap=1,
ygap=1,
colorbar={
"title": "<b>Percentage of <br>Population Vaccinated<br> </b>",
"yanchor": "top",
"y": 1.0,
"lenmode": "fraction",
"len": 0.5,
"tickvals": [
5,
15,
25,
35,
45,
55,
65,
75,
85,
95,
],
"ticktext": [
"00.00-9.99%",
"10.00-19.99%",
"20.00-29.99%",
"30.00-39.99%",
"40.00-49.99%",
"50.00-59.99%",
"60.00-69.99%",
"70.00-79.99%",
"80.00-89.99%",
"90.00-100.00%",
],
},
colorscale=[
[0.0, colours[10]],
[0.1, colours[10]],
[0.1, colours[9]],
[0.2, colours[9]],
[0.2, colours[8]],
[0.3, colours[8]],
[0.3, colours[7]],
[0.4, colours[7]],
[0.4, colours[6]],
[0.5, colours[6]],
[0.5, "rgb(255,255,204)"],
[0.6, "rgb(255,255,204)"],
[0.6, colours[4]],
[0.7, colours[4]],
[0.7, colours[3]],
[0.8, colours[3]],
[0.8, colours[2]],
[0.9, colours[2]],
[0.9, colours[1]],
[1.0, colours[1]],
],
hovertemplate="<extra></extra>Vaccine Doses Received: %{x} <br>Age Category: %{y}<br>Percentage Vaccinated: %{z:.2f}%",
)
)
fig.update_layout(
hoverlabel={
"bgcolor": "white",
"font_size": 14,
}
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig.update_layout(
title=" ",
plot_bgcolor="white",
yaxis={
"title": "<b>Age Group</b>",
"linecolor": "black",
},
font={"size": 14},
# width=2000, # width/height not set - will depend on portal space
# height=1000, # it's the legend length and font etc. that make this 'larger'
xaxis={
"title": "<b>Doses Received</b>",
"tickangle": 0,
"zeroline": True,
"linecolor": "black",
},
)
# fig.show()
fig.write_json(os.path.join(args.output_dir, "vaccine_heatmap.json"))
# fig.write_image("Plots/vaccine_heatmap.png")
|
class DatoolsError(Exception):
pass
|
"Legacy code. To be updated or depreciated."
from pyrvea.EAs.RVEA import RVEA
from pyrvea.OtherTools.ReferenceVectors import ReferenceVectors
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyrvea.Population.Population import Population
class slowRVEA(RVEA):
"""RVEA variant that impliments slow reference vector movement."""
def __init__(self, population: "Population", ea_parameters):
"""Initialize a Base Decomposition EA.
This will call methods to set up the parameters of RVEA, create
Reference Vectors, and (as of Feb 2019) run the first iteration of RVEA.
Parameters
----------
population : "Population"
This variable is updated as evolution takes place
EA_parameters : dict
Takes the EA parameters
Returns
-------
Population:
Returns the Population after evolution.
"""
if ea_parameters:
self.params = self.set_params(population, **ea_parameters)
else:
self.params = self.set_params(population)
# if population.individuals.shape[0] == 0:
# create_new_individuals(pop_size=self.params["population_size"])
# # print("Using BaseDecompositionEA init")
# self._next_iteration(population)
def set_params(
self,
population: "Population",
generations_per_iteration: int = 10,
iterations: int = 10,
Alpha: float = 2,
ref_point: list = None,
old_point: list = None,
**kwargs
):
"""Set up the parameters. Save in RVEA.params. Note, this should be
changed to align with the current structure.
Parameters
----------
population : Population
Population object
Alpha : float
The alpha parameter of APD selection.
plotting : bool
Useless really.
Returns
-------
"""
ref_vectors = ReferenceVectors(
number_of_objectives=population.problem.num_of_objectives,
creation_type="Sparse_Focused",
ref_point=old_point,
)
if ref_point is None:
ref_point = ref_vectors.values[0]
rveaparams = {
"reference_vectors": ref_vectors,
"population_size": ref_vectors.number_of_vectors,
"generations": generations_per_iteration,
"iterations": iterations,
"Alpha": Alpha,
"current_iteration_gen_count": 0,
"current_iteration_count": 0,
"current_total_gen_count": 0,
"total_generations": iterations * generations_per_iteration,
"ref_point": ref_point,
}
rveaparams.update(kwargs)
return rveaparams
def _run_interruption(self, population: "Population"):
self.params["reference_vectors"].slow_interactive_adapt(
self.params["ref_point"]
)
|
from hashlib import sha256
from http import HTTPStatus
import grequests
import pytest
from eth_utils import decode_hex, encode_hex, to_bytes, to_checksum_address, to_hex
from raiden.api.rest import APIServer
from raiden.constants import UINT64_MAX
from raiden.settings import DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS
from raiden.tests.integration.api.rest.utils import (
api_url_for,
assert_payment_conflict,
assert_payment_secret_and_hash,
assert_proper_response,
assert_response_with_error,
get_json_response,
)
from raiden.tests.utils import factories
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.transfer import watch_for_unlock_failures
from raiden.utils.secrethash import sha256_secrethash
from raiden.utils.typing import Secret
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_target_error(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
# stop app1 to force an error
app1.stop()
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier)},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments(
api_server_test_instance: APIServer, raiden_network, token_addresses, deposit
):
_, app1 = raiden_network
amount = 100
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
our_address = api_server_test_instance.rest_api.raiden_api.address
payment = {
"initiator_address": to_checksum_address(our_address),
"target_address": to_checksum_address(target_address),
"token_address": to_checksum_address(token_address),
"amount": str(amount),
"identifier": str(identifier),
}
# Test a normal payment
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier)},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response)
json_response = get_json_response(response)
assert_payment_secret_and_hash(json_response, payment)
# Test a payment without providing an identifier
payment["amount"] = "1"
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": "1"},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response)
json_response = get_json_response(response)
assert_payment_secret_and_hash(json_response, payment)
# Test that trying out a payment with an amount higher than what is available returns an error
payment["amount"] = str(deposit)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(deposit)},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT)
# Test that querying the internal events resource works
limit = 5
request = grequests.get(
api_url_for(
api_server_test_instance, "raideninternaleventsresource", limit=limit, offset=0
)
)
response = request.send().response
assert_proper_response(response)
events = response.json()
assert len(events) == limit
assert all("TimestampedEvent" in event for event in events)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_secret_hash_errors(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
secret = to_hex(factories.make_secret())
bad_secret = "Not Hex String. 0x78c8d676e2f2399aa2a015f3433a2083c55003591a0f3f33"
bad_secret_hash = "Not Hex String. 0x78c8d676e2f2399aa2a015f3433a2083c55003591a0f3f33"
short_secret = "0x123"
short_secret_hash = "Short secret hash"
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier), "secret": short_secret},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.BAD_REQUEST)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier), "secret": bad_secret},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.BAD_REQUEST)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret_hash": short_secret_hash,
},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.BAD_REQUEST)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret_hash": bad_secret_hash,
},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.BAD_REQUEST)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret": secret,
"secret_hash": secret,
},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_secret_no_hash(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 100
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
secret = to_hex(factories.make_secret())
our_address = api_server_test_instance.rest_api.raiden_api.address
payment = {
"initiator_address": to_checksum_address(our_address),
"target_address": to_checksum_address(target_address),
"token_address": to_checksum_address(token_address),
"amount": str(amount),
"identifier": str(identifier),
}
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier), "secret": secret},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response)
json_response = get_json_response(response)
assert_payment_secret_and_hash(json_response, payment)
assert secret == json_response["secret"]
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_hash_no_secret(
api_server_test_instance, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
secret = to_hex(factories.make_secret())
secret_hash = to_hex(sha256(to_bytes(hexstr=secret)).digest())
our_address = api_server_test_instance.rest_api.raiden_api.address
payment = {
"initiator_address": to_checksum_address(our_address),
"target_address": to_checksum_address(target_address),
"token_address": to_checksum_address(token_address),
"amount": str(amount),
"identifier": str(identifier),
}
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier), "secret_hash": secret_hash},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT)
assert payment == payment
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("resolver_ports", [[None, 8000]])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_resolver(
api_server_test_instance: APIServer,
raiden_network,
token_addresses,
resolvers, # pylint: disable=unused-argument
):
_, app1 = raiden_network
amount = 100
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
secret = factories.make_secret()
secret_hash = sha256_secrethash(secret)
our_address = api_server_test_instance.rest_api.raiden_api.address
payment = {
"initiator_address": to_checksum_address(our_address),
"target_address": to_checksum_address(target_address),
"token_address": to_checksum_address(token_address),
"amount": str(amount),
"identifier": str(identifier),
}
# payment with secret_hash when both resolver and initiator don't have the secret
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret_hash": encode_hex(secret_hash),
},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT)
assert payment == payment
# payment with secret where the resolver doesn't have the secret. Should work.
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": str(identifier), "secret": encode_hex(secret)},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert payment == payment
# payment with secret_hash where the resolver has the secret. Should work.
secret = Secret(
decode_hex("0x2ff886d47b156de00d4cad5d8c332706692b5b572adfe35e6d2f65e92906806e")
)
secret_hash = sha256_secrethash(secret)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret_hash": encode_hex(secret_hash),
},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert payment == payment
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_secret_and_hash(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 100
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
secret = to_hex(factories.make_secret())
secret_hash = to_hex(sha256(to_bytes(hexstr=secret)).digest())
our_address = api_server_test_instance.rest_api.raiden_api.address
payment = {
"initiator_address": to_checksum_address(our_address),
"target_address": to_checksum_address(target_address),
"token_address": to_checksum_address(token_address),
"amount": str(amount),
"identifier": str(identifier),
}
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"secret": secret,
"secret_hash": secret_hash,
},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response)
json_response = get_json_response(response)
assert_payment_secret_and_hash(json_response, payment)
assert secret == json_response["secret"]
assert secret_hash == json_response["secret_hash"]
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_conflicts(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
token_address = token_addresses[0]
target_address = app1.raiden.address
payment_url = api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
)
# two different transfers (different amounts) with same identifier at the same time:
# payment conflict
responses = grequests.map(
[
grequests.post(payment_url, json={"amount": "10", "identifier": "11"}),
grequests.post(payment_url, json={"amount": "11", "identifier": "11"}),
]
)
assert_payment_conflict(responses)
# same request sent twice, e. g. when it is retried: no conflict
responses = grequests.map(
[
grequests.post(payment_url, json={"amount": "10", "identifier": "73"}),
grequests.post(payment_url, json={"amount": "10", "identifier": "73"}),
]
)
assert all(response.status_code == HTTPStatus.OK for response in responses)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_lock_timeout(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 100
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
number_of_nodes = 2
reveal_timeout = number_of_nodes * 4 + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS
settle_timeout = 39
# try lock_timeout = reveal_timeout - should not work
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"lock_timeout": str(reveal_timeout),
},
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
# try lock_timeout = reveal_timeout * 2 - should work.
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"lock_timeout": str(2 * reveal_timeout),
},
)
with watch_for_unlock_failures(*raiden_network):
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
# try lock_timeout = settle_timeout - should work.
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(identifier),
"lock_timeout": str(settle_timeout),
},
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
# try lock_timeout = settle_timeout+1 - should not work.
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": amount, "identifier": identifier, "lock_timeout": settle_timeout + 1},
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("enable_rest_api", [True])
def test_api_payments_with_invalid_input(
api_server_test_instance: APIServer, raiden_network, token_addresses
):
_, app1 = raiden_network
amount = 100
token_address = token_addresses[0]
target_address = app1.raiden.address
settle_timeout = 39
# Invalid identifier being 0 or negative
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": "0", "lock_timeout": str(settle_timeout)},
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={"amount": str(amount), "identifier": "-1", "lock_timeout": str(settle_timeout)},
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
request = grequests.post(
api_url_for(
api_server_test_instance,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={
"amount": str(amount),
"identifier": str(UINT64_MAX + 1),
"lock_timeout": str(settle_timeout),
},
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
|
import numpy as np
from itertools import combinations
from collections import OrderedDict
from module.network_trainer import NetworkTrainer # Version 'pure Python"
# from module.c_network_trainer import NetworkTrainer
from module.save import BackUp, Database
from multiprocessing import Pool
from time import time
from os import path, mkdir
class DataManager(object):
def __init__(self, file_name="dataset_290416_3output", explanans_size=52, explanandum_size=3):
self.folder_path = "data"
self.file_path = "{}/{}.txt".format(self.folder_path, file_name)
self.explanans_size = explanans_size
self.explanandum_size = explanandum_size
self.data = self.import_txt()
def import_txt(self):
print("Import txt file.")
data = np.loadtxt(self.file_path)
return data
def format_data(self):
# Center reduce for explanans, normalize for explanandum
data = np.zeros(self.data.shape[0], dtype=[('x', float, self.explanans_size),
('y', float, self.explanandum_size)])
data["x"] = Format.center_reduce(self.data[:, :self.explanans_size])
data["y"] = Format.normalize(self.data[:, self.explanans_size:])
self.data = data
def import_data(self, explanans=None, explanandum=None, individuals=None):
# Select piece of data
if explanans is None:
explanans = np.arange(self.explanans_size)
if explanandum is None:
explanandum = np.arange(self.explanandum_size)
if individuals is None:
individuals = np.arange(self.data.shape[0])
data = np.zeros(len(individuals), dtype=[('x', float, len(explanans)),
('y', float, len(explanandum))])
data["x"] = self.data['x'][np.asarray(individuals)][:, explanans]
if len(explanandum) == 1:
data["y"] = self.data['y'][np.asarray(individuals)][:, np.asarray(explanandum)].T
else:
data["y"] = self.data['y'][np.asarray(individuals)][:, np.asarray(explanandum)]
return data
class SamplesCreator(object):
@classmethod
def combinations_samples(cls, n):
print("Compute combinations for samples...")
print("Number of individuals: {}.".format(n))
print("6 ind for learning, 4 ind for testing, 3 ind for validation")
indexes_list = []
ind = np.arange(n)
val = np.random.choice(ind, 3, replace=False)
remaining_ind = np.setdiff1d(ind, val)
ctrl_index = []
sick_index = []
for i in range(len(remaining_ind)):
if remaining_ind[i] < 7:
ctrl_index.append(remaining_ind[i])
elif remaining_ind[i] >= 7:
sick_index.append(remaining_ind[i])
else:
raise Exception('Error combination split')
ctrl_comb = [i for i in combinations(ctrl_index, 3)]
sick_comb = [i for i in combinations(sick_index, 3)]
for i in ctrl_comb:
for j in sick_comb:
indexes_list.append({'learning': np.concatenate((i, j)),
'testing': np.concatenate((np.setdiff1d(ctrl_index, i),
np.setdiff1d(sick_index, j))),
'validation': val})
print("Done.")
return indexes_list
class Format(object):
@classmethod
def normalize(cls, data, new_range=1, new_min=-0.5):
if len(data.shape) == 1:
vmin, vmax = data.min(), data.max()
formatted_data = new_range * (data - vmin) / (vmax - vmin) + new_min
else:
formatted_data = data.copy()
for i in range(data.shape[1]):
vmin, vmax = data[:, i].min(), data[:, i].max()
formatted_data[:, i] = new_range * (data[:, i] - vmin) / (vmax - vmin) + new_min
return formatted_data
@classmethod
def center_reduce(cls, data):
if len(data.shape) == 1:
mean, std = np.mean(data), np.std(data)
if std != 0:
formatted_data = (data - mean) / std
else:
formatted_data = (data - mean)
else:
formatted_data = np.zeros(data.shape)
for i in range(data.shape[1]):
mean, std = np.mean(data[:, i]), np.std(data[:, i])
if std != 0:
formatted_data[:, i] = 2 * (data[:, i] - mean) / std
else:
formatted_data[:, i] = 2 * (data[:, i] - mean)
return formatted_data
class Cursor(object):
def __init__(self):
self.position = 0
self.folder = "tmp"
self.file_name = "{}/cursor_single_output.txt".format(self.folder)
def retrieve_position(self):
if path.exists(self.file_name):
f = open(self.file_name, 'r')
f_content = f.read()
f.close()
if f_content == '':
self.position = 0
else:
try:
self.position = int(f_content)
except:
self.position = 0
else:
if not path.exists(self.folder):
mkdir(self.folder)
self.position = 0
def save_position(self):
f = open(self.file_name, "w")
f.write(str(self.position))
f.close()
def reset(self):
f = open(self.file_name, "w")
f.write(str(0))
f.close()
self.position = 0
class Supervisor:
def __init__(self, n_workers, output_file, back_up_frequency=10000):
self.n_network = 80
self.pool = Pool(processes=n_workers)
self.back_up = BackUp(database_name=output_file)
self.back_up_fq = back_up_frequency
self.kwargs_list = []
self.cursor = Cursor()
@staticmethod
def convert_seconds_to_h_m_s(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def prepare_kwargs_list(self):
hidden_layer_out0 = 30
hidden_layer_out1 = 52
hidden_layer_out2 = 52
learning_rate_out0 = 0.07
learning_rate_out1 = 0.05
learning_rate_out2 = 0.05
presentation_number_out0 = 1000
presentation_number_out1 = 2000
presentation_number_out2 = 2000
data_manager = DataManager() # Import txt file
data_manager.format_data() # Center-reduce input variables and normalize output variables
n = data_manager.data.shape[0]
indexes_list = SamplesCreator.combinations_samples(n=n)
print(len(indexes_list))
np.random.shuffle(indexes_list)
id_network = 0
for selected_ind in indexes_list[0:self.n_network]:
samples_learning_out0 = data_manager.import_data(explanandum=[0],
individuals=selected_ind['learning'])
samples_testing_out0 = data_manager.import_data(explanandum=[0],
individuals=selected_ind['testing'])
samples_learning_out1 = data_manager.import_data(explanandum=[1],
individuals=selected_ind['learning'])
samples_testing_out1 = data_manager.import_data(explanandum=[1],
individuals=selected_ind['testing'])
samples_learning_out2 = data_manager.import_data(explanandum=[2],
individuals=selected_ind['learning'])
samples_testing_out2 = data_manager.import_data(explanandum=[2],
individuals=selected_ind['testing'])
kwargs = {"dataset_out0": samples_learning_out0,
"test_dataset_out0": samples_testing_out0,
"dataset_out1": samples_learning_out1,
"test_dataset_out1": samples_testing_out1,
"dataset_out2": samples_learning_out2,
"test_dataset_out2": samples_testing_out2,
"hidden_layer_out0": [hidden_layer_out0],
"hidden_layer_out1": [hidden_layer_out1],
"hidden_layer_out2": [hidden_layer_out2],
"presentation_number_out0": presentation_number_out0,
"presentation_number_out1": presentation_number_out1,
"presentation_number_out2": presentation_number_out2,
"learning_rate_out0": learning_rate_out0,
"learning_rate_out1": learning_rate_out1,
"learning_rate_out2": learning_rate_out2,
"momentum_out0": learning_rate_out0,
"momentum_out1": learning_rate_out1,
"momentum_out2": learning_rate_out2,
"ind_learning": selected_ind['learning'],
"ind_testing": selected_ind['testing'],
"id_network": id_network,
'validation_set': selected_ind['validation']
}
self.kwargs_list.append(kwargs)
id_network += 1
def launch_test(self):
"""
Require a list of arguments
:return: None
"""
if not self.kwargs_list:
raise Exception("Before beginning testing, arguments should be added to the 'kwargs' list by calling "
"method 'fill_kwargs_list'.")
beginning_time = time()
print("********************")
self.cursor.retrieve_position()
to_do = len(self.kwargs_list)
print("Begin testing.")
while self.cursor.position + self.back_up_fq < to_do:
time_spent = self.convert_seconds_to_h_m_s(time() - beginning_time)
print("Cursor position: {}/{} (time spent: {}).".format(self.cursor.position, to_do, time_spent))
print("********************")
results = self.pool.map(self.check_single_output,
self.kwargs_list[self.cursor.position:self.cursor.position + self.back_up_fq])
self.back_up.save(results)
self.cursor.position += self.back_up_fq
self.cursor.save_position()
if self.cursor.position + self.back_up_fq == (to_do - 1):
pass
else:
time_spent = self.convert_seconds_to_h_m_s(time() - beginning_time)
print("Cursor position: {}/{} (time spent: {}).".format(self.cursor.position, to_do, time_spent))
print("********************")
results = self.pool.map(self.check_single_output, self.kwargs_list[self.cursor.position:])
self.back_up.save(results)
time_spent = self.convert_seconds_to_h_m_s(time() - beginning_time)
print("Cursor position: {}/{} (time spent: {}).".format(to_do, to_do, time_spent))
print("********************")
self.cursor.reset()
print("End of testing program.")
@staticmethod
def check_single_output(kwargs):
network_trainer = NetworkTrainer()
output = OrderedDict()
ind0 = []
ind1 = []
ind2 = []
ind3 = []
for i in [0, 1, 2]:
network_trainer.create_network(dataset=kwargs['dataset_out{}'.format(i)], hidden_layer=kwargs['hidden_layer_out{}'.format(i)])
learning_dataset = kwargs['dataset_out{}'.format(i)]
test_dataset = kwargs['test_dataset_out{}'.format(i)]
pre_test_error, pre_test_output = network_trainer.test_the_network(learning_dataset)
pre_test2_error, pre_test2_output = network_trainer.test_the_network(test_dataset)
network_trainer.teach_the_network(presentation_number=kwargs['presentation_number_out{}'.format(i)],
dataset=kwargs['dataset_out{}'.format(i)],
learning_rate=kwargs['learning_rate_out{}'.format(i)],
momentum=kwargs['momentum_out{}'.format(i)])
test_error, test_output = network_trainer.test_the_network(learning_dataset)
test2_error, test_output2 = network_trainer.test_the_network(test_dataset)
weights = network_trainer.network.weights[0]
filename = 'weights_predictor/weights_test_{id}_out{output}.txt'.format(id=kwargs['id_network'], output=i)
np.savetxt(filename, weights)
weights2 = network_trainer.network.weights[1]
filename2 = 'weights_predictor/weights_test_2_{id}_out{output}.txt'.format(id=kwargs['id_network'], output=i)
np.savetxt(filename2, weights2)
output['pre_learning_out{}'.format(i)] = np.mean(pre_test_error ** 2)
output['post_learning_out{}'.format(i)] = np.mean(test_error ** 2)
output['pre_learning_test_out{}'.format(i)] = np.mean(pre_test2_error ** 2)
output['post_learning_test_out{}'.format(i)] = np.mean(test2_error ** 2)
ind0.append(test_output2[0])
ind1.append(test_output2[1])
ind2.append(test_output2[2])
ind3.append(test_output2[3])
# for j in range(len(kwargs["test_dataset_out{}".format(i)]['x'])):
# output['ind{j}'.format(j=j)] = test_output2[j]
output['presentation_number_out{}'.format(i)] = kwargs['presentation_number_out{}'.format(i)]
output['hidden_layer_out{}'.format(i)] = kwargs['hidden_layer_out{}'.format(i)]
output['learning_rate_out{}'.format(i)] = kwargs['learning_rate_out{}'.format(i)]
output['momentum_out{}'.format(i)] = kwargs['momentum_out{}'.format(i)]
learn_index = (output['pre_learning_out{}'.format(i)] - output['post_learning_out{}'.format(i)]) / \
output['post_learning_out{}'.format(i)]
test_index = (output['pre_learning_test_out{}'.format(i)] - output[
'post_learning_test_out{}'.format(i)]) / output['post_learning_test_out{}'.format(i)]
output['index_learn_out{}'.format(i)] = 100 * learn_index
output['index_test_out{}'.format(i)] = 100 * test_index
output['ind0'] = ind0
output['ind1'] = ind1
output['ind2'] = ind2
output['ind3'] = ind3
output['ind_learning'] = kwargs['ind_learning']
output['ind_testing'] = kwargs['ind_testing']
output['id_network'] = kwargs['id_network']
output['validation_set'] = kwargs['validation_set']
kwargs.pop('dataset_out2')
return output
def parameter_test():
supervisor = Supervisor(n_workers=6, output_file='results_combinator', back_up_frequency=100)
print("\n*************************")
print('Preparing kwarg list...')
print("**************************")
supervisor.prepare_kwargs_list()
print("**************************")
print('Kwarg list ready.')
print("\n*************************")
supervisor.launch_test()
class Selector:
def __init__(self, filename):
self.filename = filename
self.db = Database(self.filename)
def select_networks(self):
selection_param = OrderedDict()
selection_param["out0"] = {'index': 450.0, 'value': 0.03}
selection_param["out1"] = {'index': 2400.0, 'value': 0.008}
selection_param["out2"] = {'index': 950.0, 'value': 0.03}
selected_networks = OrderedDict()
for i, out in enumerate(selection_param):
test_index = self.db.read_column("index_test_out{}".format(i))
test_index = [float(i) for i in test_index]
test_value = self.db.read_column("post_learning_test_out{}".format(i))
test_value = [float(i) for i in test_value]
network_list = []
for j in range(len(test_index)):
if test_index[j] > selection_param[out]['index'] and test_value[j] < selection_param[out]['value']:
network_list.append(j)
selected_networks['out{}'.format(i)] = network_list
return selected_networks
def matrix_builder(self):
selected_networks = self.select_networks()
weight_matrices = dict()
for key in selected_networks:
matrix = []
layer1 = np.loadtxt('weights_predictor/weights_test_{id}_{output}.txt'.format(id=selected_networks[key][0], output=key))
matrix.append(layer1)
layer2 = np.loadtxt('weights_predictor/weights_test_2_{id}_{output}.txt'.format(id=selected_networks[key][0], output=key))
matrix.append(layer2)
weight_matrices['{}'.format(key)] = matrix
return selected_networks, weight_matrices
def convert_group(self, results):
conv_results = np.zeros((len(results), 3))
for i, result in enumerate(results):
remove_charac = str.maketrans("", "", "[]")
remove_coma = str.maketrans("", "", ",")
result = result.translate(remove_coma).translate(remove_charac).split()
result = [int(j) for j in result]
conv_results[i] = result
return conv_results
def test_new_network(self):
print('NETWORK COMBINATOR : VALIDATION STEP\n')
network_trainer = NetworkTrainer()
data_manager = DataManager()
data_manager.format_data()
validation_set = list(self.convert_group(self.db.read_column('validation_set'))[0])
validation_set = [int(i) for i in validation_set]
print('\n*************************')
print('Ind in the validation set : {}'. format(validation_set))
selected_networks, matrix = self.matrix_builder()
print('\n*************************')
print('Selected network for output 0 : {}'.format(selected_networks['out0']))
print('Selected network for output 1 : {}'.format(selected_networks['out1']))
print('Selected network for output 2 : {}'.format(selected_networks['out2']))
network_param = [30, 52, 52]
for i in [0, 1, 2]:
data = data_manager.import_data(explanandum=[i], individuals=validation_set)
network_trainer.create_network(dataset=data, hidden_layer=[network_param[i]])
network_trainer.network.weights = matrix['out{}'.format(i)]
test2_error, test_output2 = network_trainer.test_the_network(data)
print('\n*************************')
print('Results for output : {}'.format(i))
print('Mean square error on 3 validation ind : {}'.format(np.mean(test2_error)**2))
def test_var_strength(self):
print('NETWORK COMBINATOR : VALIDATION STEP\n')
network_trainer = NetworkTrainer()
data_manager = DataManager()
data_manager.format_data()
validation_set = list(self.convert_group(self.db.read_column('validation_set'))[0])
validation_set = [int(i) for i in validation_set]
print('\n*************************')
print('Ind in the validation set : {}'.format(validation_set))
selected_networks, matrix = self.matrix_builder()
print('\n*************************')
print('Selected network for output 0 : {}'.format(selected_networks['out0']))
print('Selected network for output 1 : {}'.format(selected_networks['out1']))
print('Selected network for output 2 : {}'.format(selected_networks['out2']))
network_param = [30, 52, 52]
for i in [0, 1, 2]:
data = data_manager.import_data(explanandum=[i], individuals=validation_set)
network_trainer.create_network(dataset=data, hidden_layer=[network_param[i]])
network_trainer.network.weights = matrix['out{}'.format(i)]
print(network_trainer.network.weights[0].shape)
output = np.zeros((53, 2))
test2_error, test_output2 = network_trainer.test_the_network(data)
output[0, 0] = np.mean(test2_error)**2
output[0, 1] = np.std(test2_error)**2
for j in range(52):
network_trainer.network.weights[0][j, :] = network_trainer.network.weights[0][j, :]*0
test2_error, test_output2 = network_trainer.test_the_network(data)
output[j+1, 0] = np.mean(test2_error) ** 2
output[j+1, 1] = np.std(test2_error) ** 2
filename = 'var_strength/perf_out{output}.txt'.format(output=i)
np.savetxt(filename, output)
if __name__ == '__main__':
# parameter_test()
# selector = Selector(filename='results_combinator')
# selector.test_new_network()
selector = Selector(filename='results_combinator')
selector.test_var_strength()
|
# -*- coding: utf-8 -*-
"""
Helper functions for the tasks app: calculate_reputation_gain and
give_reputation_reward.
"""
from .models import Profile, Task
from django.db.models import F
def calculate_reputation_gain(task):
"""
Calculate the reputation gained by completing a task. Currently based on
difficulty only.
"""
DIFF = Task.DIFFICULTIES
d = task.difficulty
if d == DIFF.trivial:
return 1
if d == DIFF.easy:
return 5
if d == DIFF.OK:
return 10
if d == DIFF.hard:
return 25
if d == DIFF.heroic:
return 100
if d == DIFF.nightmare:
return 500
def give_reputation_reward(task):
"""
Add the reputation reward to the profile of the user who completed the
task.
"""
reward = calculate_reputation_gain(task)
profile = Profile.objects.get(user=task.completed_by)
profile.reputation = F('reputation') + reward
profile.save()
|
from flask import Flask
from ecommerce_api.ext import configuration
def create_app(*args, **config):
app = Flask(__name__)
configuration.init_app(app, **config)
configuration.load_extensions(app)
configuration.load_blueprints(app)
configuration.load_middlewares(app)
return app
|
from pathlib import Path
import json
from .Service import *
from nonebot import export, get_driver, on_command
from nonebot.rule import to_me
from nonebot.log import logger
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Bot, Event
# Initialization
driver = get_driver()
export().Service = Service
plugins = list()
cfg = Path(__file__).parent / '_services' / 'gcfg.json'
def _init():
if Path.is_file(cfg):
Path.unlink(cfg)
async def _get_plugins():
if not Path.is_file(cfg):
return
with open(cfg,'r',encoding='UTF-8') as f:
global plugins
try:
plugins = list(json.load(f).get('plugins'))
logger.info('成功添加%d个插件于分群管理'%len(plugins))
logger.info(str(plugins))
except Exception as e:
logger.error(e)
_init()
driver.on_startup(_get_plugins)
# Event Handler
lssv = on_command('lssv',rule=to_me(),priority=1)
@lssv.handle()
async def lssv_h(bot: Bot, event: Event, state: T_State):
plus = ''
for i in plugins:
state = check_plugin(event.dict().get('group_id'),i)
txt = '| {} | {}\n'.format('○'if state else '×',i)
plus += txt
await lssv.finish('群%d的插件有:\n===========\n%s\n===========\n通过 “启用/禁用 插件名「复数个用","隔开开关插件」”'%(event.dict().get('group_id'),plus))
enable_ = on_command('启用',rule=to_me(),priority=1,permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER)
disable_ = on_command('禁用',rule=to_me(),priority=1,permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER)
@enable_.handle()
async def enable_h(bot: Bot, event: Event, state: T_State):
if args := str(event.get_message()).strip():
state['p_name'] = args
@disable_.handle()
async def disable_h(bot: Bot, event: Event, state: T_State):
if args := str(event.get_message()).strip():
state['p_name'] = args
@enable_.got('p_name',prompt='你想启用哪些插件?')
async def enable_g(bot: Bot, event: Event, state: T_State):
p_name = state['p_name'].split(',')
done_plugins = list()
for i in p_name:
if i in plugins:
set_plugin(event.dict().get('group_id'),i)
done_plugins.append(i)
await enable_.finish('成功启用插件: {}'.format(' | '.join(p for p in done_plugins)))
@disable_.got('p_name',prompt='你想禁用哪些插件?')
async def disable_g(bot: Bot, event: Event, state: T_State):
p_name = state['p_name'].split(',')
done_plugins = list()
for i in p_name:
if i in plugins:
set_plugin(event.dict().get('group_id'),i,disable=True)
done_plugins.append(i)
await disable_.finish('成功禁用插件: {}'.format(' | '.join(p for p in done_plugins)))
|
from elasticsearch import Elasticsearch
def main():
index_name = 'squad2.0'
print("Creating index")
client = Elasticsearch()
client.indices.delete(index=index_name, ignore=[404])
with open("squad_questions_mapping.json") as index_file:
source = index_file.read().strip()
client.indices.create(index=index_name, body=source)
print("Created!")
if __name__ == '__main__':
main()
|
import matplotlib.pyplot as plt
from matplotlib import patches
def modeling(node_list: list, row_list: list, net_list: list):
figure = plt.figure()
figure.suptitle("modeling")
ax = figure.add_subplot()
row_number = 1
colors = ['#caa24e', '#caa24e', '#5a3e42', '#b35031', '#5a3e42', '#6e6e64', '#8f050e', '#e3ce82']
number_of_colors = 0
for row in row_list:
x = [row.lower_left_x_coordinate, row.lower_right_x_coordinate]
y = [row.lower_left_y_coordinate, row.lower_left_y_coordinate]
ax.plot(x, y, label="row {}".format(row_number))
row_number += 1
number_of_nodes = 1
polygons = {}
for node in node_list:
if node.node_type != "Non_Terminal":
x = [node.node_x + node.node_width]
y = [node.node_y + node.node_height]
ax.plot(x, y, "o", markersize=10)
if node.node_type == "Non_Terminal":
node_x = [node.node_x, node.node_x, node.node_x + node.node_width, node.node_x + node.node_width]
node_y = [node.node_y, node.node_y + node.node_height, node.node_y + node.node_height, node.node_y]
# we dont have unique colors for each cell.
ax.add_patch(
patches.Polygon(xy=list(zip(node_x, node_y)), fill=True, color=colors[number_of_colors],
label=number_of_nodes))
if number_of_colors == 7:
number_of_colors = 0
number_of_colors += 1
number_of_nodes += 1
for net in net_list:
net_x = [net.x_min, net.x_min, net.x_max, net.x_max]
net_y = [net.y_min, net.y_max, net.y_max, net.y_min]
ax.add_patch(patches.Polygon(xy=list(zip(net_x, net_y)), fill=False, linestyle="dashed"))
# TODO add labels in cells
# ax = plt.subplots
# for poly in polygons:
# print(poly)
# # print(current_patch)
# # cx = rx + (node.node_x + node.node_width) / 2.0
# # cy = ry + (node.node_y + node.node_height) / 2.0
# # ax.annotate(patches, (cx, cy), weight='bold', fontsize=5, ha='center', va='center')
# display final result
plt.show()
|
from pathlib import Path
# Absolute path
# E:\04-PROGRAMMING\Python
# path = Path("ecommerce")
# path = Path("ecommerce1")
# path = Path("emails")
# # print(path.mkdir())
# print(path.rmdir())
# print(path.exists())
path = Path()
# for file in path.glob('*.py'): # Search a file using a pattern
for file in path.glob('*'): # Searching all the files and directories in the current path
print(file)
# Relative path
|
from ast import literal_eval
import warnings
class AttrDict(dict):
def __init__(self, **kwargs):
super(AttrDict, self).__init__(**kwargs)
self.update(kwargs)
@staticmethod
def from_dict(dict):
ad = AttrDict()
ad.update(dict)
return ad
def __setitem__(self, key: str, value):
super(AttrDict, self).__setitem__(key, value)
super(AttrDict, self).__setattr__(key, value)
def update(self, config: dict):
for k, v in config.items():
if k not in self:
self[k] = AttrDict()
if isinstance(v, dict):
self[k].update(v)
else:
self[k] = v
def update_from_list(self, str_list: list):
assert len(str_list) % 2 == 0
for key, value in zip(str_list[0::2], str_list[1::2]):
key_list = key.split('.')
item = None
last_key = key_list.pop()
for sub_key in key_list:
item = self[sub_key] if item is None else item[sub_key]
try:
item[last_key] = literal_eval(value)
except ValueError:
item[last_key] = value
warnings.warn('a string value is set to {}'.format(key))
|
import os
import re
from time import sleep
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# Webdriver options; set to headless
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# URL for minted addressbook
URL = "https://www.minted.com/addressbook/my-account/finalize/0?it=utility_nav"
states = {
"AK": "Alaska",
"AL": "Alabama",
"AR": "Arkansas",
"AS": "American Samoa",
"AZ": "Arizona",
"CA": "California",
"CO": "Colorado",
"CT": "Connecticut",
"DC": "District of Columbia",
"DE": "Delaware",
"FL": "Florida",
"GA": "Georgia",
"GU": "Guam",
"HI": "Hawaii",
"IA": "Iowa",
"ID": "Idaho",
"IL": "Illinois",
"IN": "Indiana",
"KS": "Kansas",
"KY": "Kentucky",
"LA": "Louisiana",
"MA": "Massachusetts",
"MD": "Maryland",
"ME": "Maine",
"MI": "Michigan",
"MN": "Minnesota",
"MO": "Missouri",
"MP": "Northern Mariana Islands",
"MS": "Mississippi",
"MT": "Montana",
"NA": "National",
"NC": "North Carolina",
"ND": "North Dakota",
"NE": "Nebraska",
"NH": "New Hampshire",
"NJ": "New Jersey",
"NM": "New Mexico",
"NV": "Nevada",
"NY": "New York",
"OH": "Ohio",
"OK": "Oklahoma",
"OR": "Oregon",
"PA": "Pennsylvania",
"PR": "Puerto Rico",
"RI": "Rhode Island",
"SC": "South Carolina",
"SD": "South Dakota",
"TN": "Tennessee",
"TX": "Texas",
"UT": "Utah",
"VA": "Virginia",
"VI": "Virgin Islands",
"VT": "Vermont",
"WA": "Washington",
"WI": "Wisconsin",
"WV": "West Virginia",
"WY": "Wyoming",
}
# Set your minted.com email and password as the env vars:
# minted_email and minted_password
try:
minted_email = os.environ["minted_email"]
except KeyError:
minted_email = input("Enter your minted.com email address:")
try:
minted_password = os.environ["minted_password"]
except KeyError:
minted_password = input("Enter your minted.com password:")
driver.get(URL)
# Login form
email_elem = driver.find_element_by_name("email")
email_elem.send_keys(minted_email)
password_elem = driver.find_element_by_name("password")
password_elem.send_keys(minted_password)
login_submit = driver.find_element_by_class_name("loginButton")
login_submit.click()
sleep(5) # to load JS and be nice
driver.get("https://addressbook.minted.com/api/contacts/contacts/print/?")
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
listings = soup.find("main")
address_book = pd.DataFrame()
addressees = listings.find_all("span", {"class": "contact-name"})
address_book["name"] = [name.text.strip() for name in addressees]
addresses = listings.find_all("span", {"class": "contact-address"})
street_details = [
"".join(street.text.strip().split("\n")[:-1]).strip() for street in addresses
]
address_book["address"] = [" ".join(street.split()) for street in street_details]
city_details = [street.text.strip().split("\n")[-1].strip() for street in addresses]
address_book["locality"] = list(city_details)
def pull_state(address):
"""Isolates and formats State portion of address, if available"""
try:
field = re.findall(r"([a-zA-Z]{2,}) (\d{5})", address)[0][0]
state = states.get(field.upper(), field)
except:
state = ""
return state
def pull_zip(address):
"""Isolates and formats zipcode of address, if available"""
try:
zipcode = re.findall(r"([a-zA-Z]{2,}) (\d{5})", address)[0][1]
except:
zipcode = ""
return zipcode
address_book["state"] = address_book["locality"].apply(pull_state)
address_book["zipcode"] = address_book["locality"].apply(pull_zip)
address_book["town"] = address_book["locality"].map(lambda x: x.split(",")[0])
column_titles = ["Name", "Address", "Town", "State", "Zipcode"]
address_book = address_book.reindex(columns=[col.lower() for col in column_titles])
address_book.to_excel("./data/minted-addresses.xlsx", header=column_titles)
address_book.to_csv("./data/minted-addresses.csv", index=False, header=column_titles)
driver.close()
|
from configs import EXP_CONFIGS
import xml.etree.cElementTree as ET
from xml.etree.ElementTree import dump
from lxml import etree as ET
import os
E = ET.Element
def indent(elem, level=0):
i = "\n " + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ""
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class Network():
def __init__(self, configs):
self.configs = configs
self.sim_start = self.configs['sim_start']
self.max_steps = self.configs['max_steps']
self.current_path = os.path.dirname(os.path.abspath(__file__))
gen_training_data_path = os.path.join(
self.current_path, 'training_data')
if os.path.exists(gen_training_data_path) == False:
os.mkdir(gen_training_data_path)
if self.configs['mode'] == 'train' or self.configs['mode'] == 'train_old':
self.file_name = self.configs['file_name']
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data']))
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data'], 'net_data'))
self.current_Env_path = os.path.join(
self.current_path, 'training_data', self.configs['time_data'], 'net_data')
elif self.configs['mode'] == 'test':
self.file_name = self.configs['file_name']
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data']))
os.mkdir(os.path.join(self.current_path, 'training_data',
self.configs['time_data'], 'net_data'))
self.current_Env_path = os.path.join(
self.current_path, 'training_data', self.configs['time_data'], 'net_data')
else: # simulate
self.file_name = self.configs['file_name']
self.current_Env_path = os.path.join(
self.current_path, 'Net_data')
if os.path.exists(self.current_Env_path) == False:
os.mkdir(self.current_Env_path)
# data directory generate
gen_data_path = os.path.join(self.current_path, 'data')
if os.path.exists(gen_data_path) == False:
os.mkdir(gen_data_path)
self.num_cars = str(self.configs['num_cars'])
self.num_lanes = str(self.configs['num_lanes'])
self.flow_start = str(self.configs['flow_start'])
self.flow_end = str(self.configs['flow_end'])
self.laneLength = self.configs['laneLength']
self.nodes = list()
self.flows = list()
self.vehicles = list()
self.edges = list()
self.connections = list()
self.outputData = list()
self.traffic_light = list()
if self.configs['mode'] == 'test':
self.generate_cfg(True, 'test')
if self.configs['mode'] == 'train':
self.generate_cfg(True, 'train')
def specify_edge(self):
edges = list()
'''
상속을 위한 함수
'''
return edges
def specify_node(self):
nodes = list()
'''
상속을 위한 함수
'''
return nodes
def specify_flow(self):
flows = list()
'''
상속을 위한 함수
'''
return flows
def specify_connection(self):
connections = list()
'''
상속을 위한 함수
'''
return connections
def specify_outdata(self):
outputData = list()
'''
상속을 위한 함수
'''
return outputData
def specify_traffic_light(self):
traffic_light = list()
'''
상속을 위한 함수
'''
return traffic_light
def _generate_nod_xml(self):
self.nodes = self.specify_node()
nod_xml = ET.Element('nodes')
for node_dict in self.nodes:
# node_dict['x']=format(node_dict['x'],'.1f')
nod_xml.append(E('node', attrib=node_dict))
indent(nod_xml, 1)
dump(nod_xml)
tree = ET.ElementTree(nod_xml)
# tree.write(self.file_name+'.xml',encoding='utf-8',xml_declaration=True)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.nod.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_edg_xml(self):
self.edges = self.specify_edge()
edg_xml = ET.Element('edges')
for _, edge_dict in enumerate(self.edges):
edg_xml.append(E('edge', attrib=edge_dict))
indent(edg_xml, 1)
dump(edg_xml)
tree = ET.ElementTree(edg_xml)
# tree.write(self.xml_edg_name+'.xml',encoding='utf-8',xml_declaration=True)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.edg.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_net_xml(self):
# file_name_str=os.path.join(self.current_Env_path,self.file_name)
file_name_str = os.path.join(self.current_Env_path, self.file_name)
if len(self.traffic_light) != 0:
os.system('netconvert -n {0}.nod.xml -e {0}.edg.xml -i {0}_tl.add.xml -o {0}.net.xml --no-turnarounds True'.format(
file_name_str))
elif len(self.connections) == 0:
os.system('netconvert -n {}.nod.xml -e {}.edg.xml -o {}.net.xml --no-turnarounds True'.format(
file_name_str, file_name_str, file_name_str))
else: # connection이 존재하는 경우 -x
os.system('netconvert -n {}.nod.xml -e {}.edg.xml -x {}.con.xml -o {}.net.xml --no-turnarounds True'.format(
file_name_str, file_name_str, file_name_str, file_name_str))
def _generate_rou_xml(self):
self.flows = self.specify_flow()
route_xml = ET.Element('routes')
if len(self.vehicles) != 0: # empty
for _, vehicle_dict in enumerate(self.vehicles):
route_xml.append(E('veh', attrib=vehicle_dict))
indent(route_xml, 1)
if len(self.flows) != 0:
for _, flow_dict in enumerate(self.flows):
route_xml.append(E('flow', attrib=flow_dict))
indent(route_xml, 1)
dump(route_xml)
tree = ET.ElementTree(route_xml)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.rou.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_con_xml(self):
self.cons = self.specify_connection()
con_xml = ET.Element('connections')
if len(self.connections) != 0: # empty
for _, connection_dict in enumerate(self.connections):
con_xml.append(E('connection', attrib=connection_dict))
indent(con_xml, 1)
dump(con_xml)
tree = ET.ElementTree(con_xml)
tree.write(os.path.join(self.current_Env_path, self.file_name+'.con.xml'), pretty_print=True,
encoding='UTF-8', xml_declaration=True)
def _generate_add_xml(self):
traffic_light_set = self.specify_traffic_light()
self.traffic_light = traffic_light_set
data_additional = ET.Element('additional')
# edgeData와 landData파일의 생성위치는 data
data_additional.append(E('edgeData', attrib={'id': 'edgeData_00', 'file': '{}_edge.xml'.format(self.current_path+'\\data\\'+self.configs['mode']+'\\'+self.file_name), 'begin': '0', 'end': str(
self.configs['max_steps']), 'freq': '900'}))
indent(data_additional, 1)
data_additional.append(E('laneData', attrib={'id': 'laneData_00', 'file': '{}_lane.xml'.format(self.current_path+'\\data\\'+self.configs['mode']+'\\'+self.file_name), 'begin': '0', 'end': str(
self.configs['max_steps']), 'freq': '900'}))
indent(data_additional, 1)
dump(data_additional)
tree = ET.ElementTree(data_additional)
tree.write(os.path.join(self.current_Env_path, self.file_name+'_data.add.xml'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
tl_additional = ET.Element('additional')
if len(self.traffic_light) != 0 or self.configs['mode'] == 'simulate':
for _, tl in enumerate(traffic_light_set):
phase_set = tl.pop('phase')
tlLogic = ET.SubElement(tl_additional, 'tlLogic', attrib=tl)
indent(tl_additional, 1)
for _, phase in enumerate(phase_set):
tlLogic.append(E('phase', attrib=phase))
indent(tl_additional, 2)
dump(tl_additional)
tree = ET.ElementTree(tl_additional)
tree.write(os.path.join(self.current_Env_path, self.file_name+'_tl.add.xml'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
def generate_cfg(self, route_exist, mode='simulate'):
'''
if all the generation over, inherit this function by `super`.
'''
sumocfg = ET.Element('configuration')
inputXML = ET.SubElement(sumocfg, 'input')
inputXML.append(
E('net-file', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'.net.xml')}))
indent(sumocfg)
if route_exist == True:
if self.configs['network'] == 'grid': # grid에서만 생성
self._generate_rou_xml()
if os.path.exists(os.path.join(self.current_Env_path, self.file_name+'.rou.xml')):
inputXML.append(
E('route-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'.rou.xml')}))
indent(sumocfg)
# if os.path.exists(os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')):
# inputXML.append(
# E('additional-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')}))
# indent(sumocfg)
inputXML.append(E('additional-files', attrib={'value': os.path.join(self.current_Env_path, self.file_name+'_data.add.xml')}))
indent(sumocfg)
time = ET.SubElement(sumocfg, 'time')
time.append(E('begin', attrib={'value': str(self.sim_start)}))
indent(sumocfg)
time.append(E('end', attrib={'value': str(self.max_steps)}))
indent(sumocfg)
outputXML = ET.SubElement(sumocfg, 'output')
indent(sumocfg)
dump(sumocfg)
tree = ET.ElementTree(sumocfg)
if mode == 'simulate':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_simulate.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
elif mode == 'test':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_test.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
elif mode == 'train' or mode == 'train_old':
tree.write(os.path.join(self.current_Env_path, self.file_name+'_train.sumocfg'),
pretty_print=True, encoding='UTF-8', xml_declaration=True)
def test_net(self):
self.generate_cfg(False)
os.system('sumo-gui -c {}.sumocfg'.format(os.path.join(self.current_Env_path,
self.file_name+'_simulate')))
def sumo_gui(self):
self.generate_cfg(True)
os.system('sumo-gui -c {}.sumocfg'.format(
os.path.join(self.current_Env_path, self.file_name+'_simulate')))
def generate_all_xml(self):
self._generate_nod_xml()
self._generate_edg_xml()
self._generate_add_xml()
self._generate_net_xml()
self._generate_rou_xml()
if __name__ == '__main__':
network = Network(EXP_CONFIGS)
network.sumo_gui()
|
#!/usr/bin/python3
""" 12-main """
from models.rectangle import Rectangle
if __name__ == "__main__":
r1 = Rectangle(10, 2, 1, 9)
print(r1)
r1_dictionary = r1.to_dictionary()
print(r1_dictionary)
print(type(r1_dictionary))
r2 = Rectangle(1, 1)
print(r2)
r2.update(**r1_dictionary)
print(r2)
print(r1 == r2)
|
# -*- coding: utf-8 -*-
name = 'jemalloc'
version = '4.5.0'
def commands():
env.LD_LIBRARY_PATH.append('{root}/lib/')
appendenv('PATH', '{root}/bin/')
|
from subprocess import call
from config import Const
class Windows(object):
"""
This script builds a windows autoinstall image in the /kubam directory.
Build the autoinstall image and return error code with a message.
"""
@staticmethod
def build_boot_image(node, template, net_template):
new_image_name = Const.KUBAM_DIR + node["name"] + ".img"
new_image_dir = Const.KUBAM_DIR + node["name"]
# Copy the file to the directory.
o = call(["cp", "-f", Const.WIN_IMG, new_image_name])
if not o == 0:
return 1, "not able to copy {0} to {1}".format(Const.BASE_IMG, new_image_name)
# Create mount point
o = call(["mkdir", "-p", new_image_dir])
if not o == 0:
return 1, "not able to call 'mkdir -p {0}'".format(new_image_dir)
fw = new_image_dir + "/autounattend.xml"
try:
with open(fw, 'w') as f:
f.write(template)
except IOError as err:
print err.strerror
return 1, "{0}".format(err.strerror)
# Move this file to the fat filesystem
o = call(["mcopy", "-o", "-i", new_image_name, fw, "::autounattend.xml"])
if not o == 0:
return 1, "unable to run: mcopy -o -i {0} {1} ::autounattend.xml".format(new_image_name, fw)
# Write the file over the existing file if it exists. Hack to over write file
fw = new_image_dir + "/network.txt"
try:
with open(fw, 'w') as f:
f.write(net_template)
except IOError as err:
print err.strerror
return 1, "{0}".format(err.strerror)
# Move this file to ks.cfg
o = call(["mcopy", "-o", "-i", new_image_name, fw, "::network.txt"])
if not o == 0:
return 1, "unable to run: mcopy -o -i {0} {1} ::network.txt".format(new_image_name, fw)
# Remove stage directory
o = call(["rm", "-rf", new_image_dir])
if not o == 0:
return 1, "unable to rm -rf {0}".format(new_image_dir)
return 0, None
|
#!/usr/bin/env python
import json
# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = '######################################'
ACCESS_SECRET = '##########################################'
CONSUMER_KEY = '#########################################'
CONSUMER_SECRET = '###########################################'
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
# Initiate the connection to Twitter Streaming API
twitStream = TwitterStream(auth=oauth)
# List of phrases to track
keywords = ["#resolution", "new year's resolution", "newyear"]
# Get a sample of the public data following through Twitter
#iterator = twitter_stream.statuses.sample()
iterator = twitStream.statuses.filter(track=keywords, language="en", stall_warnings=true, )
# Get data for my specific stream
#twitter_userstream = TwitterStream(auth=oauth, domain='userstream.twitter.com')
# Print each tweet in the stream to the screen
# Here we set it to stop after getting 1000 tweets.
# You don't have to set it to stop, but can continue running
# the Twitter API to collect data for days or even longer.
tweet_count = 10000000
for tweet in iterator:
tweet_count -= 1
# Twitter Python Tool wraps the data returned by Twitter
# as a TwitterDictResponse object.
# We convert it back to the JSON format to print/score
print json.dumps(tweet)
# The command below will do pretty printing for JSON data, try it out
# print json.dumps(tweet, indent=4)
if tweet_count <= 0:
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.