hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de6490d1c1a96e9c5dde617c3def839494915247 | 1,270 | py | Python | pyfiles/30yankeedoodle.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | pyfiles/30yankeedoodle.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | pyfiles/30yankeedoodle.py | StevenPZChan/pythonchallenge | 84c0e7458189f6d74e2cfbd169d854dae11d07a9 | [
"MIT"
] | null | null | null | import requests
from PIL import Image
header = {'Authorization': 'Basic cmVwZWF0OnN3aXRjaA==', }
response = requests.get('http://www.pythonchallenge.com/pc/ring/yankeedoodle.csv', headers=header)
with open('yankeedoodle.csv', 'wb') as f:
f.write(response.content)
with open('yankeedoodle.csv', 'r') as f:
data = f.read().replace('\n', '').split(',')
num_data = len(data)
print('total:', num_data, '=', end=' ')
prime_factor(num_data) # 7367 = 53 * 139
img = Image.new('F', (139, 53))
for ind in range(num_data):
img.putpixel((ind // img.height, ind % img.height), 256 * float(data[ind]))
img.show() # n=str(x[i])[5]+str(x[i+1])[5]+str(x[i+2])[6]
x = [f'{float(s):.5f}' for s in data]
encoded_str = [str(x[i])[5] + str(x[i + 1])[5] + str(x[i + 2])[6] for i in range(0, num_data // 3 * 3, 3)]
print(encoded_str)
print(bytes(int(i) for i in encoded_str))
# 'So, you found the hidden message.\nThere is lots of room here for a long message, but we only need
# very little space to say "look at grandpa", so the rest is just garbage. \nVTZ.l
| 31.75 | 106 | 0.607874 | import requests
from PIL import Image
header = {'Authorization': 'Basic cmVwZWF0OnN3aXRjaA==', }
response = requests.get('http://www.pythonchallenge.com/pc/ring/yankeedoodle.csv', headers=header)
with open('yankeedoodle.csv', 'wb') as f:
f.write(response.content)
with open('yankeedoodle.csv', 'r') as f:
data = f.read().replace('\n', '').split(',')
num_data = len(data)
def prime_factor(n):
n = int(n)
for i in range(2, n // 2 + 1):
if n % i == 0:
print(i, end=' ')
print("*", end=' ')
return prime_factor(n // i)
print(n)
print('total:', num_data, '=', end=' ')
prime_factor(num_data) # 7367 = 53 * 139
img = Image.new('F', (139, 53))
for ind in range(num_data):
img.putpixel((ind // img.height, ind % img.height), 256 * float(data[ind]))
img.show() # n=str(x[i])[5]+str(x[i+1])[5]+str(x[i+2])[6]
x = [f'{float(s):.5f}' for s in data]
encoded_str = [str(x[i])[5] + str(x[i + 1])[5] + str(x[i + 2])[6] for i in range(0, num_data // 3 * 3, 3)]
print(encoded_str)
print(bytes(int(i) for i in encoded_str))
# 'So, you found the hidden message.\nThere is lots of room here for a long message, but we only need
# very little space to say "look at grandpa", so the rest is just garbage. \nVTZ.l
| 187 | 0 | 23 |
bd035791d582d112d9724a0bb9dc759903dd6332 | 2,930 | py | Python | src/pm_proxy/dependents/npmjs_dependents.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | 1 | 2022-01-29T16:13:06.000Z | 2022-01-29T16:13:06.000Z | src/pm_proxy/dependents/npmjs_dependents.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | null | null | null | src/pm_proxy/dependents/npmjs_dependents.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | 1 | 2022-01-29T16:13:07.000Z | 2022-01-29T16:13:07.000Z | import sys
import json
import urllib
import logging
import requests
import argparse
from urlparse import urljoin
from bs4 import BeautifulSoup
# parse arguments
parser = argparse.ArgumentParser(prog="npmjs_dependents", description="Parse arguments")
parser.add_argument("name", help="Name of the package to query dependents")
parser.add_argument("-o", "--outfile", help="Path to the output file for storing dependents info")
args = parser.parse_args(sys.argv[1:])
name = args.name
outfile = args.outfile
# deprecated
# breath-first search for dependents
dependents = set()
queue = [name]
while queue:
vertex = queue.pop(0)
if vertex not in dependents:
dependents.add(vertex)
queue.extend(set(get_dependents_html(vertex)) - dependents)
dependents -= {name}
# post-processing
print("there are %d dependents for package name %s" % (len(dependents), name))
if outfile:
json.dump(list(dependents), open(outfile, 'w'), indent=2)
| 38.552632 | 199 | 0.715358 | import sys
import json
import urllib
import logging
import requests
import argparse
from urlparse import urljoin
from bs4 import BeautifulSoup
# parse arguments
parser = argparse.ArgumentParser(prog="npmjs_dependents", description="Parse arguments")
parser.add_argument("name", help="Name of the package to query dependents")
parser.add_argument("-o", "--outfile", help="Path to the output file for storing dependents info")
args = parser.parse_args(sys.argv[1:])
name = args.name
outfile = args.outfile
# deprecated
def get_dependents(pkgName):
# npm-dependents
# https://github.com/davidmarkclements/npm-dependents/blob/master/index.js#L17-L19
metadata_url = "https://skimdb.npmjs.com/registry/_design/app/_view/dependedUpon?group_level=2&startkey=%5B%22" + pkgName + "%22%5D&endkey=%5B%22" + pkgName + "%22%2C%7B%7D%5D&skip=0&limit=10000"
metadata_content = requests.request('GET', metadata_url)
dependents = [row['key'][-1] for row in json.loads(metadata_content.text)['rows']]
logging.warning("%s has %d dependents (%s)", pkgName, len(dependents), dependents)
return dependents
def get_dependents_link(link):
logging.warning("fetching link: %s", link)
pkg_dep_content = requests.request('GET', link)
soup = BeautifulSoup(pkg_dep_content.text, "lxml")
dependents = {link.get('href').split('/package/')[-1] for link in soup.findAll('a', attrs={'target': '_self'}) if '/package/' in link.get('href')}
logging.warning("link %s has %d packages (%s)", link, len(dependents), dependents)
return dependents, soup
def get_dependents_html(pkgName):
# current page
base_link = "https://www.npmjs.com/browse/depended/%s" % pkgName
dependents, soup = get_dependents_link(base_link)
if len(dependents) == 0:
return dependents
# process next pages
pkg_dep_next_page_url = [link.get('href') for link in soup.findAll('a', text='Next Page')]
while pkg_dep_next_page_url:
pkg_dep_next_page_url = urljoin(base_link, pkg_dep_next_page_url[0])
next_page_dependents, soup = get_dependents_link(pkg_dep_next_page_url)
dependents |= next_page_dependents
pkg_dep_next_page_url = [link.get('href') for link in soup.findAll('a', text='Next Page')]
logging.warning("collected %d dependents for %s so far", len(dependents), pkgName)
# the total number of dependents
logging.warning("%s has %d dependents (%s)", pkgName, len(dependents), dependents)
return dependents
# breath-first search for dependents
dependents = set()
queue = [name]
while queue:
vertex = queue.pop(0)
if vertex not in dependents:
dependents.add(vertex)
queue.extend(set(get_dependents_html(vertex)) - dependents)
dependents -= {name}
# post-processing
print("there are %d dependents for package name %s" % (len(dependents), name))
if outfile:
json.dump(list(dependents), open(outfile, 'w'), indent=2)
| 1,900 | 0 | 68 |
36888563b1a30f9ca9ffdc10fdf78ea9c9bce694 | 9,204 | py | Python | Ordenamento Radix Sort (TP4 A2)/A2 RadixSort LSD vRelatorio.py | DFTF-PConsole/AED-Labs-Arvores-LEI | 2149b4f6058fb581282c5c5d813ae99e233e453b | [
"MIT"
] | null | null | null | Ordenamento Radix Sort (TP4 A2)/A2 RadixSort LSD vRelatorio.py | DFTF-PConsole/AED-Labs-Arvores-LEI | 2149b4f6058fb581282c5c5d813ae99e233e453b | [
"MIT"
] | null | null | null | Ordenamento Radix Sort (TP4 A2)/A2 RadixSort LSD vRelatorio.py | DFTF-PConsole/AED-Labs-Arvores-LEI | 2149b4f6058fb581282c5c5d813ae99e233e453b | [
"MIT"
] | null | null | null | # v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
# #### BIBLIOTECAS ####
import sys
import time
import msvcrt
from io import StringIO
# #### CONSTANTES ####
CMD_IN_GLOBAL = "PESQ_GLOBAL\n"
CMD_IN_UTILIZADORES = "PESQ_UTILIZADORES\n"
CMD_IN_TERMINADO = "TCHAU\n"
CMD_IN_TERMINADO2 = "TCHAU"
CMD_IN_PALAVRAS = "PALAVRAS\n"
CMD_IN_FIM = "FIM.\n"
CMD_OUT_GUARDADO = "GUARDADAS"
# #### FUNCOES ####
# v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
if __name__ == '__main__':
# ### START ###
main()
| 42.414747 | 194 | 0.557366 | # v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
# #### BIBLIOTECAS ####
import sys
import time
import msvcrt
from io import StringIO
# #### CONSTANTES ####
CMD_IN_GLOBAL = "PESQ_GLOBAL\n"
CMD_IN_UTILIZADORES = "PESQ_UTILIZADORES\n"
CMD_IN_TERMINADO = "TCHAU\n"
CMD_IN_TERMINADO2 = "TCHAU"
CMD_IN_PALAVRAS = "PALAVRAS\n"
CMD_IN_FIM = "FIM.\n"
CMD_OUT_GUARDADO = "GUARDADAS"
# #### FUNCOES ####
def main():
# ### FUNCAO ### Funcao Principal
array_palavras = [] # [omg, xd, a, ahah] | Input "palavra + ID"
array_count_global = [] # [3, 1, 10, 2] ou [[3, 0], [1, 1], [10, 2], [2, 3]] [Count, Indice]
array_count_utilizadores = [] # [2, 1, 5, 2] > [[Count, Indice], ...]
# array_utilizadores = [] # [[109, 114], [109], [455,677,232,124,345], [098,345]] , IDs - Diferentes
textos_relatorio = ["A", "B", "C", "D"]
for n_texto in textos_relatorio:
print("# # # # # # TEXTO " + n_texto + " # # # # # #")
nome_fich = "./StdinsCalculaTempos/StdinTexto" + n_texto + "RelatorioF4.txt"
array_palavras = [] # [omg, xd, a, ahah] | Input "palavra + ID"
array_count_global = [] # [3, 1, 10, 2] ou [[3, 0], [1, 1], [10, 2], [2, 3]] [Count, Indice]
array_count_utilizadores = [] # [2, 1, 5, 2] > [[Count, Indice], ...]
acumula_global = 0
acumula_utilizadores = 0
while msvcrt.kbhit(): # Clean stdin Windows
msvcrt.getch()
my_file = open(nome_fich, "r")
my_stdin = my_file.read()
my_file.close()
sys.stdin = StringIO(my_stdin)
if sys.stdin.readline() == CMD_IN_PALAVRAS:
array_palavras, array_count_global, array_count_utilizadores = input_palavras(array_palavras,
array_count_global,
array_count_utilizadores)
else:
sys.exit("Erro - Sem Comando Incial: " + CMD_IN_PALAVRAS)
print("+++++++++")
print(array_palavras)
print("+++++++++")
for i in range(20):
print("###### Tentativa " + str(i + 1) + " ######")
temp_array_count_global = []
temp_array_count_utilizadores = []
for j in range(len(array_palavras)):
temp_array_count_global.append(array_count_global[j])
temp_array_count_utilizadores.append(array_count_utilizadores[j])
tempo_global, tempo_utilizadores = input_cmd(array_palavras, temp_array_count_global,
temp_array_count_utilizadores)
acumula_global = acumula_global + tempo_global
acumula_utilizadores = acumula_utilizadores + tempo_utilizadores
print("##############\n")
print("#########################################")
acumula_global = (acumula_global / 20.0) * 1000
acumula_utilizadores = (acumula_utilizadores / 20.0) * 1000
print("***** Tempo MEDIO em MS - PESQUISA GLOBAL = " + str(acumula_global) + " *****")
print("***** Tempo MEDIO em MS - PESQUISA UTILIZADORES = " + str(acumula_utilizadores) + " *****")
print("#########################################\n\n")
return 0
def input_palavras(array_palavras, array_count_global, array_count_utilizadores):
# ### FUNCAO ### Le e manipula o texto do stdin ate CMD_IN_FIM
array_ids_utilizadores = [] # [[109, 114], [109], [455,677,232,124,345], [098,345]] , IDs - Diferentes
for linha in sys.stdin:
if linha == "\n" or linha == "":
sys.exit("Erro - Sem Texto para input")
if linha == CMD_IN_FIM:
break
palavras = linha.split(" ")
palavras[0] = palavras[0].upper()
palavras[1] = palavras[1][:-1]
if palavras[0] in array_palavras:
indice = array_palavras.index(palavras[0])
array_count_global[indice][0] += 1
if not int(palavras[1]) in array_ids_utilizadores[indice]:
array_ids_utilizadores[indice].append(int(palavras[1]))
array_count_utilizadores[indice][0] += 1
else:
array_palavras.append(palavras[0])
indice = len(array_palavras)-1
array_ids_utilizadores.append([int(palavras[1])])
array_count_global.append([1, indice])
array_count_utilizadores.append([1, indice])
print(CMD_OUT_GUARDADO)
return array_palavras, array_count_global, array_count_utilizadores
def input_cmd(array_palavras, array_count_global, array_count_utilizadores):
# ### FUNCAO ### Le, executa e escreve no stdout os comandos no stdin, ate CMD_IN_TERMINADO
tempo_global = tempo_utilizadores = 0
start_cmd = time.time()
array_count_global = ordenacao(array_count_global)
end_cmd = time.time()
tempo_global = end_cmd - start_cmd
print("*Tempo em MS - CMD-PesquisaGlobal = " + str(tempo_global * 1000) + " ||| Start Vs End: " + str(
start_cmd) + "|" + str(end_cmd) + " *")
string = ""
valor = array_count_global[-1][0]
start = len(array_palavras) - 1
for i in range(len(array_palavras) - 1, -1, -1):
if valor == array_count_global[i][0]:
start = i
else:
break
alvo = []
for i in range(start, len(array_palavras)):
indice = array_count_global[i][1]
alvo.append(array_palavras[indice])
alvo.sort()
for i in range(len(alvo)):
string = string + str(alvo[i]) + " "
print(string[:-1])
start_cmd = time.time()
array_count_utilizadores = ordenacao(array_count_utilizadores)
end_cmd = time.time()
tempo_utilizadores = end_cmd - start_cmd
print("*Tempo em MS - CMD-PesquisaUtilizadores = " + str(tempo_utilizadores * 1000) + " ||| Start Vs End: " + str(
start_cmd) + "|" + str(end_cmd) + " *")
string = ""
valor = array_count_utilizadores[-1][0]
start = len(array_palavras) - 1
for i in range(len(array_palavras) - 1, -1, -1):
if valor == array_count_utilizadores[i][0]:
start = i
else:
break
alvo = []
for i in range(start, len(array_palavras)):
indice = array_count_utilizadores[i][1]
alvo.append(array_palavras[indice])
alvo.sort()
for i in range(len(alvo)):
string = string + str(alvo[i]) + " "
print(string[:-1])
return tempo_global, tempo_utilizadores
# v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
def radix_sort(array, tamanho):
# ### FUNCAO ### Radix Sort para Inteiros
max_digitos = len(str(max(array[:][0]))) # Numero Maior no Array -> Quantos Digitos?
array_contador = [] # Contar cada digito
array_semiordenado = [] # Auxiliar: array -> sort -> array_semiordenado -> copia -> array
divisor = 1
for j in range(max_digitos): # Passar por todos os digitos do numero
for i in range(10):
array_contador.append(0)
for i in range(tamanho):
array_semiordenado.append(0)
for i in range(tamanho): # Contar as ocorrencias de cada digito
digito = int((array[i][0]/divisor) % 10)
array_contador[digito] = array_contador[digito] + 1
temp = temp_anterior = 0
for i in range(1, 10): # array_contador[i] -> fica com a posicao onde se colocam os numeros com
# este digito no array_semiordenado
temp = array_contador[i]
array_contador[i] = array_contador[i-1] + temp_anterior # Ver Exemplo/Explicacao nas Notas-iPad
temp_anterior = temp
array_contador[0] = 0 # Digitos com comecam no Indice 0 (e o primeiro)
for i in range(tamanho): # Semi-Ordena com base nos digitos e posicao no array_contador
digito = int((array[i][0]/divisor) % 10)
array_semiordenado[array_contador[digito]] = array[i]
array_contador[digito] = array_contador[digito] + 1
for i in range(tamanho): # COPIA: De 'array_semiordenado' Para 'array'
array[i] = array_semiordenado[i]
divisor = divisor * 10 # *** Prepara a proxima iteracao do ciclo ***
array_contador = []
array_semiordenado = []
return array
def ordenacao(array):
# ### FUNCAO ### *Abstracao* -> Chama a funcao de ordenamento
array = radix_sort(array, len(array))
return array
if __name__ == '__main__':
# ### START ###
main()
| 8,303 | 0 | 114 |
bf1f03d1d8eb0e447032ef76e65ee79f0f89065a | 929 | py | Python | solutions/142. Linked List Cycle II.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | solutions/142. Linked List Cycle II.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | solutions/142. Linked List Cycle II.py | JacopoPan/leetcode-top100-liked-questions | 03dc05f087d05805d54b7585ce740338f3128833 | [
"MIT"
] | null | null | null | """
Runtime: 1626 ms, faster than 5.01% of Python3 online submissions for Linked List Cycle II.
Memory Usage: 17.3 MB, less than 73.61% of Python3 online submissions for Linked List Cycle II.
"""
from typing import List
from typing import Optional
if __name__ == "__main__":
main()
| 23.820513 | 95 | 0.597417 | """
Runtime: 1626 ms, faster than 5.01% of Python3 online submissions for Linked List Cycle II.
Memory Usage: 17.3 MB, less than 73.61% of Python3 online submissions for Linked List Cycle II.
"""
from typing import List
from typing import Optional
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:
cache = []
while head is not None:
cache.append(head)
head = head.next
if head in cache:
return head
return None
def main():
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n2
sol = Solution()
ans = sol.detectCycle(n1)
print('Output:',ans.val)
print('Expected:', 2)
if __name__ == "__main__":
main()
| 533 | -12 | 121 |
943b3f02b5809012fae0f1ddb09bba60bd0f3709 | 932 | py | Python | blipp/dummy.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | null | null | null | blipp/dummy.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | null | null | null | blipp/dummy.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | 1 | 2015-12-14T01:14:39.000Z | 2015-12-14T01:14:39.000Z | # =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import time
EVENT_TYPES={
"dummy":"ps:testing:dummy"
}
class Probe:
"""
Dummy probe that just sleeps and returns 1
"""
| 29.125 | 81 | 0.565451 | # =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import time
EVENT_TYPES={
"dummy":"ps:testing:dummy"
}
class Probe:
"""
Dummy probe that just sleeps and returns 1
"""
def __init__(self, service, measurement):
self.config = measurement["configuration"]
self.duration = self.config.get("schedule_params", {}).get("duration", 0)
def get_data(self):
time.sleep(self.duration)
return {EVENT_TYPES["dummy"]: 1}
| 226 | 0 | 54 |
86c861ac13441abc037261171e3480debd2223eb | 4,080 | py | Python | pytests/tests/stepsize/vis_stepsize.py | shamanDevel/DiffDVR | 99fbe9f114d0097daf402bde2ae35f18dade335d | [
"BSD-3-Clause"
] | 12 | 2021-08-02T04:51:48.000Z | 2022-01-14T18:02:27.000Z | pytests/tests/stepsize/vis_stepsize.py | shamanDevel/DiffDVR | 99fbe9f114d0097daf402bde2ae35f18dade335d | [
"BSD-3-Clause"
] | 2 | 2021-11-04T14:23:30.000Z | 2022-02-28T10:30:13.000Z | pytests/tests/stepsize/vis_stepsize.py | shamanDevel/DiffDVR | 99fbe9f114d0097daf402bde2ae35f18dade335d | [
"BSD-3-Clause"
] | 4 | 2021-07-16T10:23:45.000Z | 2022-01-04T02:51:43.000Z | import os
import sys
sys.path.append(os.getcwd())
import h5py
import tests.vis_gui
import torch
import numpy as np
import skimage.transform
import matplotlib.colors
import matplotlib.pyplot
import pyrenderer
if __name__ == "__main__":
#ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\skull4gauss"))
ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\thorax2gauss"))
ui.show()
| 36.756757 | 99 | 0.628431 | import os
import sys
sys.path.append(os.getcwd())
import h5py
import tests.vis_gui
import torch
import numpy as np
import skimage.transform
import matplotlib.colors
import matplotlib.pyplot
import pyrenderer
class UIStepsize(tests.vis_gui.UI):
def __init__(self, folder):
keys = [
"filename", "mean", "lr"
]
losses = ["min_stepsize", "max_stepsize"]
extra_values = [
"filename", "reference_tf", "stepsizes", "gradient_norms",
"total_min_stepsize", "total_max_stepsize"]
super().__init__(
folder,
keys,
losses,
512, 256,
extra_values,
delayed_loading=False,
has_volume_slices=True)
self.folder = folder
def _createKey(self, hdf5_file: h5py.File):
return self.Key(
filename=os.path.splitext(os.path.split(hdf5_file.filename)[1])[0],
mean="%.3f" % hdf5_file.attrs['meanStepsize'],
lr="%.3f" % hdf5_file.attrs['lr'],
)
def _createValue(self, hdf5_file: h5py.File, filename: str):
stepsizes = hdf5_file['stepsizes'][...]
return self.Value(
min_stepsize=hdf5_file['min_stepsizes'][...],
max_stepsize=hdf5_file['max_stepsizes'][...],
stepsizes=stepsizes,
gradient_norms=hdf5_file['gradient_norm'][...],
reference_tf=hdf5_file['reference_tf'][...],
filename=os.path.splitext(filename)[0],
total_min_stepsize=np.min(stepsizes),
total_max_stepsize=np.max(stepsizes),
)
def get_num_epochs(self, current_value):
return current_value.stepsizes.shape[0]
def get_transfer_function(self, current_value, current_epoch):
return self.tf_reference
def render_current_value(self, current_value, current_epoch):
from diffdvr import renderer_dtype_torch
volume = self.volume_data
stepsize = current_value.stepsizes[current_epoch]
stepsize = torch.from_numpy(stepsize).to(device=self.device, dtype=renderer_dtype_torch)
tf = torch.from_numpy(self.tf_reference).to(device=self.device, dtype=renderer_dtype_torch)
inputs = self.renderer.settings.clone()
inputs.camera_mode = pyrenderer.CameraMode.ReferenceFrame
inputs.camera = pyrenderer.CameraReferenceFrame(self.cameras, self.camera_fov_radians)
inputs.tf = tf
inputs.volume = volume
inputs.step_size = stepsize
B = 1
W = inputs.screen_size.x
H = inputs.screen_size.y
output_color = torch.empty(
B, H, W, 4, dtype=volume.dtype, device=volume.device)
output_termination_index = torch.empty(
B, H, W, dtype=torch.int32, device=volume.device)
outputs = pyrenderer.RendererOutputs(output_color, output_termination_index)
pyrenderer.Renderer.render_forward(inputs, outputs)
return output_color.detach().cpu().numpy()[0]
def get_slice(self, is_reference: bool, current_value, current_epoch,
slice: float, axis : str):
if slice < 0.5:
stepsize = current_value.stepsizes[current_epoch]
norm = matplotlib.colors.LogNorm(
vmin=current_value.total_min_stepsize,
vmax=current_value.total_max_stepsize)
cm = matplotlib.pyplot.get_cmap("viridis").reversed()
colors = cm(norm(stepsize[0]))
else:
gradient_norm = current_value.gradient_norms[current_epoch]
norm = matplotlib.colors.LogNorm(
vmin=max(1e-4, np.min(gradient_norm)),
vmax=max(1e-4, np.max(gradient_norm)))
cm = matplotlib.pyplot.get_cmap("inferno")
colors = cm(norm(gradient_norm[0]))
return colors[:,:,:3]
if __name__ == "__main__":
#ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\skull4gauss"))
ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\thorax2gauss"))
ui.show()
| 3,421 | 14 | 212 |
5dfa2cf07cecc859c0f04b49c985a9ecba85a964 | 12,200 | py | Python | mysite/polls/tests.py | js1294/ECM-2434-Group-Software-Engineering-Project | c7edb7d9006920712341d780e3941a99f729630b | [
"Apache-2.0"
] | 2 | 2022-02-17T13:01:36.000Z | 2022-02-28T11:50:23.000Z | mysite/polls/tests.py | js1294/ECM-2434-Group-Software-Engineering-Project | c7edb7d9006920712341d780e3941a99f729630b | [
"Apache-2.0"
] | 13 | 2022-03-02T20:25:47.000Z | 2022-03-23T10:47:16.000Z | mysite/polls/tests.py | js1294/ECM-2434-Group-Software-Engineering-Project | c7edb7d9006920712341d780e3941a99f729630b | [
"Apache-2.0"
] | 4 | 2022-02-27T13:19:23.000Z | 2022-03-21T15:19:11.000Z | """Django tests to ensure that the app is working correctly are written and run here."""
import tempfile
import datetime
from django.db.models.fields.files import ImageFieldFile
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.test.client import Client
from .models import Profile, Image, Challenge
from . import validate, image_metadata, ml_ai_image_classification
class TestAdminPanel(TestCase):
"""test admin functionality"""
def create_user(self):
"""create a test admin"""
self.username = "test_admin"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
self.user = user
def test_spider_admin(self):
"""test that admin can login and access admin pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
admin_pages = [
"/admin/",
]
for page in admin_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
self.user.delete()
class TestUserPanel(TestCase):
"""test user functionality"""
def create_user(self):
"""create a test user"""
self.username = "testuser"
self.password = "Cheesytoenails@123"
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def test_user_credentials(self):
"""test that the user has the correct username, password"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
self.assertEqual("testuser",self.username)
self.assertEqual("Cheesytoenails@123",self.password)
self.user.delete()
def test_user_profile(self):
"""test that user profile is created when user is created"""
self.create_user()
if len(Profile.objects.filter(user=self.user)) !=1:
self.fail("Profile not created correctly")
self.user.delete()
def test_spider_user(self):
"""test that user can login and access user pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
user_pages = [
"/polls/",
"/polls/feed",
"/polls/uploadimage",
"/polls/leaderboards",
"/polls/profile",
"/polls/viewprofile/"+str(self.user.username)
]
for page in user_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
# should be redirected away from admin page
admin_page = "/admin/"
resp = client.get(admin_page)
self.assertEqual(resp.status_code, 302)
self.user.delete()
class TestValidate(TestCase):
"""tests for validate.py"""
def create_user(self,username,password):
"""create a test admin"""
self.username = username
self.password = password
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a user and paths to test images"""
self.create_user("testuser","Cheesytoenails@123")
self.large_image_path = './media/feed/picture/6mb_image.jpg'
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""delete the test user"""
User.objects.get(username="testuser").delete()
def test_check_user_unique(self):
"""should raise error if username already exists"""
self.assertRaises(ValidationError, validate.check_user_unique, "testuser")
try:
validate.check_user_unique("unique_user")
except ValidationError:
self.fail("validate.check_user_unique() raised ValidationError unexpectedly!")
def test_validate_number(self):
"""test that password number validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("hdbg1247562mdm")
except ValidationError:
self.fail("validate.validate_number() raised ValidationError unexpectedly!")
def test_validate_special(self):
"""test that password special character validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("@124hsvjv£%*(*&^%£")
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_upper_lower(self):
"""test that upper and lower case validation for passwords works"""
# test with input that should raise error
self.assertRaises(ValidationError, validate.validate_upper_lower, "hello")
# more extreme input
self.assertRaises(ValidationError, validate.validate_upper_lower, "kjaelnal31259$$*&")
# input that should be valid
try:
validate.validate_upper_lower("HEllO")
except ValidationError:
self.fail("validate.validate_upper_lower() raised ValidationError unexpectedly!")
def test_validate_check_image_type(self):
"""test that image type is validated correctly"""
# any image that is not .jpg should raise an error
image = tempfile.NamedTemporaryFile(suffix=".png")
self.assertRaises(ValidationError, validate.check_image_type, image)
# test that it works with jpg
image = tempfile.NamedTemporaryFile(suffix=".jpg")
try:
validate.check_image_type(image)
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_image_size(self):
"""test that image size is validated correctly"""
image = tempfile.NamedTemporaryFile(suffix='.jpg')
#a small tempfile should be valid
self.assertEqual('valid',validate.validate_image_size(image.name))
#large image under 20mb should be valid
self.assertEqual('valid',validate.validate_image_size(self.large_image_path))
def test_validate_metadata(self):
"""test that metadata is validated"""
# image with correct metadata should be valid
self.assertEqual("valid",validate.validate_metadata(self.good_image_path))
# image without metadata should be invalid
self.assertEqual("missing metadata",validate.validate_metadata(self.bad_image_path))
class TestImage(TestCase):
"""test the Image model"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a test user and a temporary test image to be stored in an Image model object"""
self.create_user()
self.img_obj = Image.objects.create(user=self.user
,
description="desc",
img=SimpleUploadedFile(name='test_image.jpg',
content='',
content_type='image/jpeg'),
gps_coordinates=(50.7366, -3.5350),
taken_date=datetime.datetime.now(),score=0)
def tearDown(self):
"""delete the user and their test image objects"""
images = Image.objects.filter(user=self.user)
for image in images:
image.img.delete()
image.delete()
self.user.delete()
def test_image(self):
"""test that Image objects are created and stored correctly"""
self.assertEqual("desc",self.img_obj.description)
if isinstance(self.img_obj.img,ImageFieldFile) == False:
self.fail("img in Image model not stored correctly")
self.assertEqual((50.7366,-3.5350),self.img_obj.gps_coordinates)
self.assertEqual(0,self.img_obj.score)
class TestChallenge(TestCase):
"""test the Challenge model"""
def setUp(self):
"""set up a test challenge object"""
self.challenge_obj = Challenge.objects.create(name='test_challenge',
description='desc',
location=(50.7366,-3.5350),
locationRadius=1,
subject='building',
startDate=datetime.datetime.now(),
endDate=datetime.datetime.now()
)
self.challenge_obj.save()
def tearDown(self):
"""delete the challenge object"""
self.challenge_obj.delete()
def test_challenge(self):
"""test that challnge objects are being saved and stored correctly"""
self.assertEqual(self.challenge_obj.name,'test_challenge')
self.assertEqual(self.challenge_obj.description,'desc')
self.assertEqual(self.challenge_obj.location,(50.7366,-3.5350))
self.assertEqual(self.challenge_obj.subject,'building')
class TestImageMetadata(TestCase):
"""test methods from image_metadata"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""set up a good image with metadata, bad image without"""
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""nothing to tear down"""
pass
def test_get_gps(self):
"""test that gps data is gathered correctly"""
# image without metadata should throw exception
with self.assertRaises(Exception) as context:
image_metadata.get_gps(self.bad_image_path)
self.assertTrue('exif not found' in str(context.exception))
# image with metadata should return a tuple with location data
try:
ret_val = image_metadata.get_gps(self.good_image_path)
if isinstance(ret_val,tuple) == False:
self.fail("image_metadata.get_gps() does not return a tuple")
else:
pass
except Exception:
self.fail("image_metadata.get_gps() threw an unexpected exception")
def test_get_lat(self):
"""test that latitudes are either positive or negative depending on north vs south"""
assert image_metadata.get_lat("N",[1,0,2]) >0
assert image_metadata.get_lat("S",[1,0,2]) <0
# ignore unexpected data
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_long(self):
"""test that longitudes are either positive or negative depending on east vs west"""
assert image_metadata.get_long("E",[1,0,2]) >0
assert image_metadata.get_lat("W",[1,0,2]) <0
# unexpected data should be ignored
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_distance(self):
"""test that distance between two points is correct"""
# this sum shows whether the distance calculation is working
self.assertEqual(0,image_metadata.get_distance((50.7366, -3.5350),(50.7366, -3.5350)))
def test_get_time(self):
"""test that time data is gathered from an image"""
try:
ret_val = image_metadata.get_time(self.good_image_path)
try:
time = datetime.datetime.strptime(ret_val, '%Y:%m:%d %H:%M:%S')
except:
self.fail("image_metadata.get_time() does not return a datetime")
except:
self.fail("image_metadata.get_time() fails to find a time")
def test_get_time_dif(self):
"""test the image_metadata get_time_dif by recreating the logic"""
time = datetime.datetime.now()
difference = time - time
datetime.timedelta(0, 8, 562000)
seconds_in_day = 24 * 60 * 60
ret_val = (difference.days * seconds_in_day + difference.seconds) / 60
#difference between equal dates should be 0 to prove that the sum is calculated correctly
self.assertEqual(0,ret_val) | 33.983287 | 94 | 0.74 | """Django tests to ensure that the app is working correctly are written and run here."""
import tempfile
import datetime
from django.db.models.fields.files import ImageFieldFile
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.test.client import Client
from .models import Profile, Image, Challenge
from . import validate, image_metadata, ml_ai_image_classification
class TestAdminPanel(TestCase):
"""test admin functionality"""
def create_user(self):
"""create a test admin"""
self.username = "test_admin"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
self.user = user
def test_spider_admin(self):
"""test that admin can login and access admin pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
admin_pages = [
"/admin/",
]
for page in admin_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
self.user.delete()
class TestUserPanel(TestCase):
"""test user functionality"""
def create_user(self):
"""create a test user"""
self.username = "testuser"
self.password = "Cheesytoenails@123"
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def test_user_credentials(self):
"""test that the user has the correct username, password"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
self.assertEqual("testuser",self.username)
self.assertEqual("Cheesytoenails@123",self.password)
self.user.delete()
def test_user_profile(self):
"""test that user profile is created when user is created"""
self.create_user()
if len(Profile.objects.filter(user=self.user)) !=1:
self.fail("Profile not created correctly")
self.user.delete()
def test_spider_user(self):
"""test that user can login and access user pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
user_pages = [
"/polls/",
"/polls/feed",
"/polls/uploadimage",
"/polls/leaderboards",
"/polls/profile",
"/polls/viewprofile/"+str(self.user.username)
]
for page in user_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
# should be redirected away from admin page
admin_page = "/admin/"
resp = client.get(admin_page)
self.assertEqual(resp.status_code, 302)
self.user.delete()
class TestValidate(TestCase):
"""tests for validate.py"""
def create_user(self,username,password):
"""create a test admin"""
self.username = username
self.password = password
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a user and paths to test images"""
self.create_user("testuser","Cheesytoenails@123")
self.large_image_path = './media/feed/picture/6mb_image.jpg'
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""delete the test user"""
User.objects.get(username="testuser").delete()
def test_check_user_unique(self):
"""should raise error if username already exists"""
self.assertRaises(ValidationError, validate.check_user_unique, "testuser")
try:
validate.check_user_unique("unique_user")
except ValidationError:
self.fail("validate.check_user_unique() raised ValidationError unexpectedly!")
def test_validate_number(self):
"""test that password number validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("hdbg1247562mdm")
except ValidationError:
self.fail("validate.validate_number() raised ValidationError unexpectedly!")
def test_validate_special(self):
"""test that password special character validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("@124hsvjv£%*(*&^%£")
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_upper_lower(self):
"""test that upper and lower case validation for passwords works"""
# test with input that should raise error
self.assertRaises(ValidationError, validate.validate_upper_lower, "hello")
# more extreme input
self.assertRaises(ValidationError, validate.validate_upper_lower, "kjaelnal31259$$*&")
# input that should be valid
try:
validate.validate_upper_lower("HEllO")
except ValidationError:
self.fail("validate.validate_upper_lower() raised ValidationError unexpectedly!")
def test_validate_check_image_type(self):
"""test that image type is validated correctly"""
# any image that is not .jpg should raise an error
image = tempfile.NamedTemporaryFile(suffix=".png")
self.assertRaises(ValidationError, validate.check_image_type, image)
# test that it works with jpg
image = tempfile.NamedTemporaryFile(suffix=".jpg")
try:
validate.check_image_type(image)
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_image_size(self):
"""test that image size is validated correctly"""
image = tempfile.NamedTemporaryFile(suffix='.jpg')
#a small tempfile should be valid
self.assertEqual('valid',validate.validate_image_size(image.name))
#large image under 20mb should be valid
self.assertEqual('valid',validate.validate_image_size(self.large_image_path))
def test_validate_metadata(self):
"""test that metadata is validated"""
# image with correct metadata should be valid
self.assertEqual("valid",validate.validate_metadata(self.good_image_path))
# image without metadata should be invalid
self.assertEqual("missing metadata",validate.validate_metadata(self.bad_image_path))
class TestImage(TestCase):
"""test the Image model"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a test user and a temporary test image to be stored in an Image model object"""
self.create_user()
self.img_obj = Image.objects.create(user=self.user
,
description="desc",
img=SimpleUploadedFile(name='test_image.jpg',
content='',
content_type='image/jpeg'),
gps_coordinates=(50.7366, -3.5350),
taken_date=datetime.datetime.now(),score=0)
def tearDown(self):
"""delete the user and their test image objects"""
images = Image.objects.filter(user=self.user)
for image in images:
image.img.delete()
image.delete()
self.user.delete()
def test_image(self):
"""test that Image objects are created and stored correctly"""
self.assertEqual("desc",self.img_obj.description)
if isinstance(self.img_obj.img,ImageFieldFile) == False:
self.fail("img in Image model not stored correctly")
self.assertEqual((50.7366,-3.5350),self.img_obj.gps_coordinates)
self.assertEqual(0,self.img_obj.score)
class TestChallenge(TestCase):
"""test the Challenge model"""
def setUp(self):
"""set up a test challenge object"""
self.challenge_obj = Challenge.objects.create(name='test_challenge',
description='desc',
location=(50.7366,-3.5350),
locationRadius=1,
subject='building',
startDate=datetime.datetime.now(),
endDate=datetime.datetime.now()
)
self.challenge_obj.save()
def tearDown(self):
"""delete the challenge object"""
self.challenge_obj.delete()
def test_challenge(self):
"""test that challnge objects are being saved and stored correctly"""
self.assertEqual(self.challenge_obj.name,'test_challenge')
self.assertEqual(self.challenge_obj.description,'desc')
self.assertEqual(self.challenge_obj.location,(50.7366,-3.5350))
self.assertEqual(self.challenge_obj.subject,'building')
class TestImageMetadata(TestCase):
"""test methods from image_metadata"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""set up a good image with metadata, bad image without"""
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""nothing to tear down"""
pass
def test_get_gps(self):
"""test that gps data is gathered correctly"""
# image without metadata should throw exception
with self.assertRaises(Exception) as context:
image_metadata.get_gps(self.bad_image_path)
self.assertTrue('exif not found' in str(context.exception))
# image with metadata should return a tuple with location data
try:
ret_val = image_metadata.get_gps(self.good_image_path)
if isinstance(ret_val,tuple) == False:
self.fail("image_metadata.get_gps() does not return a tuple")
else:
pass
except Exception:
self.fail("image_metadata.get_gps() threw an unexpected exception")
def test_get_lat(self):
"""test that latitudes are either positive or negative depending on north vs south"""
assert image_metadata.get_lat("N",[1,0,2]) >0
assert image_metadata.get_lat("S",[1,0,2]) <0
# ignore unexpected data
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_long(self):
"""test that longitudes are either positive or negative depending on east vs west"""
assert image_metadata.get_long("E",[1,0,2]) >0
assert image_metadata.get_lat("W",[1,0,2]) <0
# unexpected data should be ignored
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_distance(self):
"""test that distance between two points is correct"""
# this sum shows whether the distance calculation is working
self.assertEqual(0,image_metadata.get_distance((50.7366, -3.5350),(50.7366, -3.5350)))
def test_get_time(self):
"""test that time data is gathered from an image"""
try:
ret_val = image_metadata.get_time(self.good_image_path)
try:
time = datetime.datetime.strptime(ret_val, '%Y:%m:%d %H:%M:%S')
except:
self.fail("image_metadata.get_time() does not return a datetime")
except:
self.fail("image_metadata.get_time() fails to find a time")
def test_get_time_dif(self):
"""test the image_metadata get_time_dif by recreating the logic"""
time = datetime.datetime.now()
difference = time - time
datetime.timedelta(0, 8, 562000)
seconds_in_day = 24 * 60 * 60
ret_val = (difference.days * seconds_in_day + difference.seconds) / 60
#difference between equal dates should be 0 to prove that the sum is calculated correctly
self.assertEqual(0,ret_val) | 0 | 0 | 0 |
00360430e0d7600fd4981ee2a9f61d6755f8e92b | 17,465 | py | Python | scripts/loading/phenotype/load_phenotype.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 5 | 2015-11-24T23:09:46.000Z | 2019-11-06T17:48:13.000Z | scripts/loading/phenotype/load_phenotype.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 188 | 2017-08-28T22:39:03.000Z | 2022-03-02T14:53:46.000Z | scripts/loading/phenotype/load_phenotype.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 7 | 2018-05-13T01:58:07.000Z | 2021-06-25T19:08:33.000Z | import sys
import importlib
from src.models import Apo, Locusdbentity, Referencedbentity, Phenotypeannotation, \
Source, PhenotypeannotationCond, Taxonomy, Chebi, Phenotype, Allele, Reporter, Chebi
from scripts.loading.database_session import get_session
from scripts.loading.util import get_strain_taxid_mapping
__author__ = 'sweng66'
cond_start_index = 12
cond_stop_index = 33
column_size = 36
cond_class = ['treatment', 'media', 'phase', 'temperature',
'chemical', 'assay', 'radiation']
# degree_file = "scripts/loading/phenotype/data/sample_line_with_degree.txt"
if __name__ == '__main__':
infile = None
if len(sys.argv) >= 2:
infile = sys.argv[1]
else:
print("Usage: python load_phenotype.py datafile")
print("Usage example: python load_phenotype.py scripts/loading/phenotype/data/phenotype_dataCuration091717.tsv")
exit()
logfile = "scripts/loading/phenotype/logs/" + infile.split('/')[4].replace(".txt", ".log")
load_phenotypes(infile, logfile)
| 37.398287 | 262 | 0.536444 | import sys
import importlib
from src.models import Apo, Locusdbentity, Referencedbentity, Phenotypeannotation, \
Source, PhenotypeannotationCond, Taxonomy, Chebi, Phenotype, Allele, Reporter, Chebi
from scripts.loading.database_session import get_session
from scripts.loading.util import get_strain_taxid_mapping
__author__ = 'sweng66'
cond_start_index = 12
cond_stop_index = 33
column_size = 36
cond_class = ['treatment', 'media', 'phase', 'temperature',
'chemical', 'assay', 'radiation']
# degree_file = "scripts/loading/phenotype/data/sample_line_with_degree.txt"
def load_phenotypes(infile, logfile):
nex_session = get_session()
name_to_locus_id = {}
for x in nex_session.query(Locusdbentity).all():
name_to_locus_id[x.systematic_name] = x.dbentity_id
if x.gene_name:
name_to_locus_id[x.gene_name] = x.dbentity_id
sgd = nex_session.query(Source).filter_by(format_name='SGD').one_or_none()
source_id = sgd.source_id
pmid_to_reference_id = dict([(x.pmid, x.dbentity_id) for x in nex_session.query(Referencedbentity).all()])
experiment_to_id = {}
mutant_to_id = {}
for x in nex_session.query(Apo).all():
if x.apo_namespace == 'experiment_type':
experiment_to_id[x.display_name] = x.apo_id
if x.apo_namespace == 'mutant_type':
mutant_to_id[x.display_name] = x.apo_id
annotation_id_to_last_group_id = {}
for x in nex_session.query(PhenotypeannotationCond).all():
last_group_id = 1
if x.annotation_id in annotation_id_to_last_group_id:
last_group_id = annotation_id_to_last_group_id[x.annotation_id]
if x.group_id > last_group_id:
last_group_id = x.group_id
annotation_id_to_last_group_id[x.annotation_id] = last_group_id
phenotype_to_id = dict([(x.display_name, x.phenotype_id) for x in nex_session.query(Phenotype).all()])
taxid_to_taxonomy_id = dict([(x.taxid, x.taxonomy_id) for x in nex_session.query(Taxonomy).all()])
allele_to_id = dict([(x.display_name, x.allele_id) for x in nex_session.query(Allele).all()])
reporter_to_id = dict([(x.display_name, x.reporter_id) for x in nex_session.query(Reporter).all()])
chebiid_to_name = dict([(x.chebiid, x.display_name) for x in nex_session.query(Chebi).all()])
fw = open(logfile, "w")
key_to_annotation_id = dict([((x.dbentity_id, x.taxonomy_id, x.reference_id, x.phenotype_id, x.experiment_id, x.mutant_id, x.allele_id, x.reporter_id, x.strain_name, x.details), x.annotation_id) for x in nex_session.query(Phenotypeannotation).all()])
strain_taxid_mapping = get_strain_taxid_mapping()
# f0 = open(degree_file)
# degree = None
# for line in f0:
# field = line.split("\t")
# degree = field[26]
# f0.close()
f = open(infile)
header = []
i = 0
superheader = []
header = []
cond_header = []
for line in f:
i = i + 1
pieces = line.strip().split("\t")
if i == 1:
superheader = pieces
continue
if i == 2:
j = 0
for x in pieces:
if x in ['required', 'Required'] or x == '':
x = superheader[j]
if x == "ChEBI ID":
x = "chemical_name"
header.append(x)
j = j + 1
header.append("details")
cond_header = header[cond_start_index:cond_stop_index]
continue
# if len(header) < column_size:
# for r in range(len(header), column_size-1):
# header.append("")
if len(pieces) < column_size:
for r in range(len(pieces), column_size-1):
pieces.append("")
conds = {}
created_by = None
dbentity_id = None
reference_id = None
taxonomy_id = None
experiment_id = None
mutant_id = None
allele_id = None
allele_comment = ""
reporter_id = None
reporter_comment = ""
details = ""
observable = ""
qualifier = ""
phenotype_id = None
strain_name = ""
bad_row = 0
conds = pieces[cond_start_index:cond_stop_index]
### testing
# print ("length of header=", len(header))
# print ("pieces33: ", header[33], pieces[33])
# print ("pieces34: ", header[34], pieces[34])
# print ("pieces35: ", header[35], pieces[35])
# continue
### end of testing
k = 0
for x in pieces:
if k < len(header):
field_name = header[k].strip()
else:
continue
if k < cond_stop_index and k >= cond_start_index:
k = k + 1
continue
k = k + 1
if x is "":
continue
## the rest is for phenotypeannotation table
if field_name.startswith('curator'):
created_by = x.strip()
if field_name == 'feature_name':
dbentity_id = name_to_locus_id.get(x.strip())
if dbentity_id is None:
print("The feature_name:", x, " is not in the database.")
bad_row = 1
break
if field_name == 'PMID':
reference_id = pmid_to_reference_id.get(int(x.strip()))
if reference_id is None:
print("The PMID: ", x, " is not in the database.")
bad_row = 1
break
if field_name == "experiment_type":
experiment_id = experiment_to_id.get(x.strip().replace('"', ''))
if experiment_id is None:
print("The experiment_type:", x, " is not in the APO table.")
bad_row = 1
break
if field_name == "mutant_type":
mutant_id = mutant_to_id.get(x.strip())
if mutant_id is None:
print("The mutant_type:", x, " is not in the APO table.")
bad_row = 1
continue
if field_name == "observable":
observable = x.strip()
if field_name == "qualifier":
qualifier = x.strip()
if field_name == "strain_background":
taxid = strain_taxid_mapping.get(x.strip())
if taxid is None:
print("The strain_background:", x, " is not in the mapping.")
bad_row = 1
continue
taxonomy_id = taxid_to_taxonomy_id.get(taxid)
if taxonomy_id is None:
print("The TAXON ID: ", taxid, " is not in the database.")
bad_row = 1
continue
if field_name == "strain_name":
strain_name = x.strip()
if field_name == "allele_name":
allele_id = allele_to_id.get(x.strip())
if allele_id is None:
allele_id = insert_allele(nex_session, fw, source_id,
created_by, x.strip())
allele_to_id[x.strip()] = allele_id
if field_name == "allele_description":
allele_comment = x
if field_name == "reporter_name":
reporter_id = reporter_to_id.get(x.strip())
if reporter_id is None:
reporter_id = insert_reporter(nex_session, fw, source_id,
created_by, x.strip())
reporter_to_id[x.strip()] = reporter_id
if field_name == "reporter_description":
reporter_comment = x
if field_name == "details":
details = x
if bad_row == 1:
continue
if created_by is None and observable == "":
continue
if observable != "":
phenotype = observable
if qualifier != "":
phenotype = observable + ": " + qualifier
phenotype_id = phenotype_to_id.get(phenotype)
if phenotype_id is None:
print("The phenotype:", phenotype, " is not in the database.")
continue
else:
print("No observable is provided for line:", line)
continue
if dbentity_id is None:
print("No feature_name is provided for line:", line)
continue
if taxonomy_id is None:
print("No strain_background is provided for line:", line)
continue
if reference_id is None:
print("No PMID is provided for line:",line)
continue
if created_by is None:
print("No curator ID is provided for line:", line)
continue
# print "dbentity_id=", dbentity_id, ", source_id=", source_id, ", taxonomy_id=", taxonomy_id, ", reference_id=", reference_id, ", phenotype_id=", phenotype_id, ", allele_id=", allele_id, ", allele_comment=", allele_comment, ", reporter_id=", reporter_id
key = (dbentity_id, taxonomy_id, reference_id, phenotype_id, experiment_id, mutant_id, allele_id, reporter_id, strain_name, details)
annotation_id = key_to_annotation_id.get(key)
group_id = 1
if annotation_id is None:
annotation_id = insert_phenotypeannotation(nex_session, fw,
source_id, created_by,
dbentity_id, taxonomy_id,
reference_id, phenotype_id,
experiment_id, mutant_id,
allele_id, allele_comment,
reporter_id, reporter_comment,
strain_name, details)
key_to_annotation_id[key] = annotation_id
else:
group_id = annotation_id_to_last_group_id.get(annotation_id)
if group_id is None:
group_id = 1
else:
group_id = group_id + 1
## insert conditions here
m = 0
for r in range(0, int(len(cond_header)/3)):
cond_name = conds[m]
cond_value = conds[m+1]
cond_unit = conds[m+2]
cond_class = cond_header[m].split("_")[0]
m = m + 3
if cond_name == "":
continue
if cond_class == "chemical":
chemical_names = cond_name.split(',')
chemical_values = cond_value.split(',')
chemical_units = cond_unit.split(',')
print("chemical_names=", chemical_names)
print("chemical_values=", chemical_values)
print("chemical_units=", chemical_units)
n = 0
for chemical_name in chemical_names:
chebiid = None
if chemical_name.startswith("CHEBI:"):
chebiid = chemical_name
else:
chebiid = "CHEBI:" + chemical_name
chebiid = chebiid.replace(" ", "")
cond_name = chebiid_to_name.get(chebiid)
cond_value = chemical_values[n]
cond_unit = chemical_units[n]
print("cond_name=", cond_name)
print("cond_value=", cond_value)
print("cond_unit=", cond_unit)
n = n + 1
if cond_name is None:
print("The ChEBI ID", chebiid, " is not in the database.")
continue
insert_phenotypeannotation_cond(nex_session, fw, created_by,
annotation_id, group_id,
cond_class, cond_name,
cond_value, cond_unit)
else:
# if cond_class in ['temperature', 'treatment'] and cond_unit.endswith('C'):
# cond_unit = degree
insert_phenotypeannotation_cond(nex_session, fw, created_by,
annotation_id, group_id,
cond_class, cond_name,
cond_value, cond_unit)
annotation_id_to_last_group_id[annotation_id] = group_id
##########
# nex_session.rollback()
nex_session.commit()
fw.close()
f.close()
def insert_phenotypeannotation_cond(nex_session, fw, created_by, annotation_id, group_id, cond_class, cond_name, cond_value, cond_unit):
print("New phenotypeannotation_cond:", created_by, annotation_id, group_id, cond_class, cond_name,cond_value, cond_unit)
x = PhenotypeannotationCond(annotation_id = annotation_id,
group_id = group_id,
condition_class = cond_class,
condition_name = cond_name,
condition_value = cond_value,
condition_unit = cond_unit,
created_by = created_by)
nex_session.add(x)
nex_session.flush()
nex_session.refresh(x)
def insert_reporter(nex_session, fw, source_id, created_by, reporter_name):
reporter_name= reporter_name.replace('"', '')
print("NEW Reporter:", created_by, reporter_name)
format_name = reporter_name.replace(" ", "_").replace("/", "-")
obj_url = "/reporter/" + format_name
x = Reporter(format_name = format_name,
display_name = reporter_name,
obj_url = obj_url,
source_id = source_id,
created_by = created_by)
nex_session.add(x)
nex_session.flush()
nex_session.refresh(x)
fw.write("Insert a new reporter: display_name=" + reporter_name + "\n")
return x.reporter_id
def insert_allele(nex_session, fw, source_id, created_by, allele_name):
allele_name = allele_name.replace('"', '')
print("NEW Allele:", created_by, allele_name)
format_name = allele_name.replace(" ", "_").replace("/", "-")
obj_url = "/allele/" + format_name
x = Allele(format_name = format_name,
display_name = allele_name,
obj_url = obj_url,
source_id = source_id,
created_by = created_by)
nex_session.add(x)
nex_session.flush()
nex_session.refresh(x)
fw.write("Insert a new allele: display_name=" + allele_name + "\n")
return x.allele_id
def insert_phenotypeannotation(nex_session, fw, source_id, created_by, dbentity_id, taxonomy_id, reference_id, phenotype_id, experiment_id, mutant_id, allele_id, allele_comment, reporter_id, reporter_comment, strain_name, details):
print("NEW phenotypeannotation: ", created_by, dbentity_id, taxonomy_id, reference_id, phenotype_id, experiment_id, mutant_id, allele_id, allele_comment, reporter_id, reporter_comment, strain_name, details)
allele_comment = allele_comment.replace('"', '')
reporter_comment = reporter_comment.replace('"', '')
# details = details.replace('"', '').decode('utf8')
details = details.replace('"', '')
x = Phenotypeannotation(source_id = source_id,
dbentity_id = dbentity_id,
taxonomy_id = taxonomy_id,
reference_id = reference_id,
phenotype_id = phenotype_id,
experiment_id = experiment_id,
mutant_id = mutant_id,
allele_id = allele_id,
allele_comment = allele_comment,
reporter_id = reporter_id,
reporter_comment = reporter_comment,
strain_name = strain_name,
details = details,
created_by = created_by)
nex_session.add(x)
nex_session.flush()
nex_session.refresh(x)
fw.write("Insert a new phenotypeannotation: dbentity_id=" + str(dbentity_id) + " reference_id=" + str(reference_id) + " phenotype_id=" + str(phenotype_id) + " experiment_id=" + str(experiment_id) + " mutant_id=" + str(mutant_id) + "\n")
return x.annotation_id
if __name__ == '__main__':
infile = None
if len(sys.argv) >= 2:
infile = sys.argv[1]
else:
print("Usage: python load_phenotype.py datafile")
print("Usage example: python load_phenotype.py scripts/loading/phenotype/data/phenotype_dataCuration091717.tsv")
exit()
logfile = "scripts/loading/phenotype/logs/" + infile.split('/')[4].replace(".txt", ".log")
load_phenotypes(infile, logfile)
| 16,262 | 0 | 143 |
56eba2bd40e6acba1eb4a8799f336598eab7c462 | 1,413 | py | Python | config.py | GT-AcerZhang/PaddlePaddle-OCR | b37211cae0ce3182ca7be05ba4a67153282fd7b5 | [
"Apache-2.0"
] | null | null | null | config.py | GT-AcerZhang/PaddlePaddle-OCR | b37211cae0ce3182ca7be05ba4a67153282fd7b5 | [
"Apache-2.0"
] | null | null | null | config.py | GT-AcerZhang/PaddlePaddle-OCR | b37211cae0ce3182ca7be05ba4a67153282fd7b5 | [
"Apache-2.0"
] | null | null | null | # data dict
dict_path = "dataset/dict.txt"
# Data shape
data_shape = [1, 60, -1]
# Minibatch size.
batch_size = 128
# Learning rate.
lr = 1e-3
# Learning rate decay strategy. 'piecewise_decay' or None is valid.
lr_decay_strategy = None
# L2 decay rate.
l2decay = 4e-4
# Momentum rate.
momentum = 0.9
# The threshold of gradient clipping.
gradient_clip = 10.0
# The number of iterations.
total_step = 720000
# Log period.
log_period = 100
# character class num + 1 .
num_classes = 62
# Save model period. '-1' means never saving the model.
save_model_period = 5000
# Evaluate period. '-1' means never evaluating the model.
eval_period = 5000
# The list file of images to be used for training.
train_list = 'dataset/train.txt'
# The list file of images to be used for training.
test_list = 'dataset/test.txt'
train_prefix = 'dataset/train'
test_prefix = 'dataset/test'
# Which type of network to be used. 'crnn_ctc' or 'attention'
use_model = 'crnn_ctc'
# Save model path
model_path = 'models/%s/train/' % use_model
infer_model_path = 'models/%s/infer/' % use_model
# The init model file of directory.
init_model = None
# Whether use GPU to train.
use_gpu = True
# Min average window.
min_average_window = 10000
# Max average window. It is proposed to be set as the number of minibatch in a pass.
max_average_window = 12500
# Average window.
average_window = 0.15
# Whether use parallel training.
parallel = False
| 27.705882 | 84 | 0.742392 | # data dict
dict_path = "dataset/dict.txt"
# Data shape
data_shape = [1, 60, -1]
# Minibatch size.
batch_size = 128
# Learning rate.
lr = 1e-3
# Learning rate decay strategy. 'piecewise_decay' or None is valid.
lr_decay_strategy = None
# L2 decay rate.
l2decay = 4e-4
# Momentum rate.
momentum = 0.9
# The threshold of gradient clipping.
gradient_clip = 10.0
# The number of iterations.
total_step = 720000
# Log period.
log_period = 100
# character class num + 1 .
num_classes = 62
# Save model period. '-1' means never saving the model.
save_model_period = 5000
# Evaluate period. '-1' means never evaluating the model.
eval_period = 5000
# The list file of images to be used for training.
train_list = 'dataset/train.txt'
# The list file of images to be used for training.
test_list = 'dataset/test.txt'
train_prefix = 'dataset/train'
test_prefix = 'dataset/test'
# Which type of network to be used. 'crnn_ctc' or 'attention'
use_model = 'crnn_ctc'
# Save model path
model_path = 'models/%s/train/' % use_model
infer_model_path = 'models/%s/infer/' % use_model
# The init model file of directory.
init_model = None
# Whether use GPU to train.
use_gpu = True
# Min average window.
min_average_window = 10000
# Max average window. It is proposed to be set as the number of minibatch in a pass.
max_average_window = 12500
# Average window.
average_window = 0.15
# Whether use parallel training.
parallel = False
| 0 | 0 | 0 |
c6f416a63af3b1efcde2f7ff2466a868d562aa14 | 426 | py | Python | src/boot.py | begeistert/thingsboard | a12fe9e8688df27f6f03798ac96511d4a5475421 | [
"MIT"
] | null | null | null | src/boot.py | begeistert/thingsboard | a12fe9e8688df27f6f03798ac96511d4a5475421 | [
"MIT"
] | null | null | null | src/boot.py | begeistert/thingsboard | a12fe9e8688df27f6f03798ac96511d4a5475421 | [
"MIT"
] | 1 | 2021-08-31T09:04:00.000Z | 2021-08-31T09:04:00.000Z | # This file is executed on every boot (including wake-boot from deepsleep)
import uos
import gc
import network
import sys
# import webrepl
# import esp
from wifi import *
sys.path.reverse()
# uos.dupterm(None, 1) # disable REPL on UART(0)
# esp.osdebug(None)
# webrepl.start()
gc.collect()
# Se inicia la conexión WiFi
connection = network.WLAN(network.STA_IF)
connection.active(True)
connection.connect(ssid, password)
| 17.75 | 74 | 0.753521 | # This file is executed on every boot (including wake-boot from deepsleep)
import uos
import gc
import network
import sys
# import webrepl
# import esp
from wifi import *
sys.path.reverse()
# uos.dupterm(None, 1) # disable REPL on UART(0)
# esp.osdebug(None)
# webrepl.start()
gc.collect()
# Se inicia la conexión WiFi
connection = network.WLAN(network.STA_IF)
connection.active(True)
connection.connect(ssid, password)
| 0 | 0 | 0 |
37bc23220677061ba989d369dbd704972ca5dcca | 3,781 | py | Python | utility.py | parice02/anagram | cbee7b2acd73beafa02ab60497f194bc679bc15b | [
"MIT"
] | null | null | null | utility.py | parice02/anagram | cbee7b2acd73beafa02ab60497f194bc679bc15b | [
"MIT"
] | null | null | null | utility.py | parice02/anagram | cbee7b2acd73beafa02ab60497f194bc679bc15b | [
"MIT"
] | null | null | null | # -*- conding: utf8 -*-
"""
@author: Muhammed Zeba (parice02)
"""
import time
import sqlite3
from re import compile, I
from typing import List, Dict
from pathlib import Path
import json
def regexp(motif: str, item: str) -> bool:
"""retourne True si le motif regex a été satisfait dans l'item
False sinon
"""
pattern = compile(motif, I)
return pattern.search(item) is not None
def listfetchall(cursor: sqlite3.Cursor) -> List:
"Return all rows from a cursor as a list"
return [row[0] for row in cursor.fetchall()]
class Timer(object):
""" """
class LoggerTimer(Timer):
"""
Source: https://saladtomatonion.com/blog/2014/12/16/mesurer-le-temps-dexecution-de-code-en-python/
"""
@staticmethod
class DBSQLite3(object):
""" """
def __init__(self, sqlite3_db: str = "db.db"):
""" """
self._connection = sqlite3.connect(sqlite3_db)
self._connection.create_function("regexp", 2, regexp)
self._cursor = self._connection.cursor()
def close_connection(self):
""" """
self._connection.close()
def close_cursor(self):
""" """
self._cursor.close()
@LoggerTimer("DBSQLite.execute_query() process time")
def execute_query(self, params) -> List:
""" """
query = "SELECT DISTINCT mot FROM mots WHERE LENGTH(mot) = :len AND regexp(:expr, mot)"
try:
self._cursor.execute(query, params)
results = listfetchall(self._cursor)
return (
results
if len(results) != 0
else [
0,
_("Aucune correspondance trouvée"),
]
)
except Exception as e:
return [
0,
e.__str__(),
]
| 26.815603 | 102 | 0.590849 | # -*- conding: utf8 -*-
"""
@author: Muhammed Zeba (parice02)
"""
import time
import sqlite3
from re import compile, I
from typing import List, Dict
from pathlib import Path
import json
def N_(s):
return s
def regexp(motif: str, item: str) -> bool:
"""retourne True si le motif regex a été satisfait dans l'item
False sinon
"""
pattern = compile(motif, I)
return pattern.search(item) is not None
def listfetchall(cursor: sqlite3.Cursor) -> List:
"Return all rows from a cursor as a list"
return [row[0] for row in cursor.fetchall()]
def load_config():
CONFIG_FILE = "config/config.json"
config_file = Path(CONFIG_FILE)
if config_file.exists() and config_file.is_file():
with open(file=CONFIG_FILE, mode="r", encoding="utf8") as file:
return json.load(file)
else:
raise FileNotFoundError
def load_license():
LICENCE_FILE = "LICENSE"
licence_path = Path(LICENCE_FILE)
if licence_path.exists() and licence_path.is_file():
with open(file=licence_path, mode="r", encoding="utf8") as file:
return file.read()
else:
raise FileNotFoundError
class Timer(object):
""" """
def __enter__(self):
self.start()
# __enter__ must return an instance bound with the "as" keyword
return self
def __exit__(self, *args, **kwargs):
# There are other arguments to __exit__ but we don't care here
self.stop()
def start(self):
if hasattr(self, "interval"):
del self.interval
self.start_time = time.time()
def stop(self):
if hasattr(self, "start_time"):
self.interval = time.time() - self.start_time
del self.start_time # Force timer re-init
class LoggerTimer(Timer):
"""
Source: https://saladtomatonion.com/blog/2014/12/16/mesurer-le-temps-dexecution-de-code-en-python/
"""
@staticmethod
def default_logger(msg):
print(msg)
def __init__(self, prefix="", func=None):
# Use func if not None else the default one
self.f = func or LoggerTimer.default_logger
# Format the prefix if not None or empty, else use empty string
self.prefix = f"{prefix}" if prefix else ""
def __call__(self, func):
# Use self as context manager in a decorated function
def decorated_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorated_func
def stop(self):
# Call the parent method
super(LoggerTimer, self).stop()
# Call the logging function with the message
self.f(f"{self.prefix}: {self.interval}")
class DBSQLite3(object):
""" """
def __init__(self, sqlite3_db: str = "db.db"):
""" """
self._connection = sqlite3.connect(sqlite3_db)
self._connection.create_function("regexp", 2, regexp)
self._cursor = self._connection.cursor()
def close_connection(self):
""" """
self._connection.close()
def close_cursor(self):
""" """
self._cursor.close()
@LoggerTimer("DBSQLite.execute_query() process time")
def execute_query(self, params) -> List:
""" """
query = "SELECT DISTINCT mot FROM mots WHERE LENGTH(mot) = :len AND regexp(:expr, mot)"
try:
self._cursor.execute(query, params)
results = listfetchall(self._cursor)
return (
results
if len(results) != 0
else [
0,
_("Aucune correspondance trouvée"),
]
)
except Exception as e:
return [
0,
e.__str__(),
]
| 1,663 | 0 | 284 |
12eafafe3a8f9e6a932a137acceeca7bda2d2a78 | 4,508 | py | Python | Cogs/general.py | Nemu627/EarthMC-StatsBOT-Discord | f34680f2ae23650c106c8536b46a7dde331adbad | [
"MIT"
] | null | null | null | Cogs/general.py | Nemu627/EarthMC-StatsBOT-Discord | f34680f2ae23650c106c8536b46a7dde331adbad | [
"MIT"
] | null | null | null | Cogs/general.py | Nemu627/EarthMC-StatsBOT-Discord | f34680f2ae23650c106c8536b46a7dde331adbad | [
"MIT"
] | null | null | null | import discord
import emc
from emc.async_ import get_data
from discord.ext import commands
| 51.816092 | 180 | 0.61402 | import discord
import emc
from emc.async_ import get_data
from discord.ext import commands
def _long_fields(embed, title, list_):
all_comma_sep = ", ".join(list_)
if len(all_comma_sep) > 1024-6:
list_a = all_comma_sep[:1024-6].split(", ")[:-1]
embed.add_field(name=title, value=f"```{', '.join(list_a)}```", inline=False)
_long_fields(embed, "\N{zero width space}", list_[len(list_a):])
else:
embed.add_field(name=title, value=f"```{all_comma_sep}```", inline=False)
class AppCmdGeneral(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["t"])
async def town(self, ctx, town_to_find):
try:
async with ctx.typing():
town = emc.Town(town_to_find, data=await get_data())
except emc.exceptions.TownNotFoundException:
embed = discord.Embed(title=f"The town {town_to_find} was not found", colour=0xb00e0e)
else:
embed = discord.Embed(title=town.name, colour=int(town.colour[1:], 16))
embed.add_field(name="Mayor", value=f"```{town.mayor}```")
embed.add_field(name="nation", value=f"```{town.nation}```")
embed.add_field(name="Flags", value=f"""```diff
{'+' if town.flags['capital'] else '-'} Capital
{'+' if town.flags['fire'] else '-'} Fire
{'+' if town.flags['explosions'] else '-'} Explosions
{'+' if town.flags['mobs'] else '-'} Mobs
{'+' if town.flags['pvp'] else '-'} PVP
```""")
_long_fields(embed, f"Residents [{len(town.residents)}]", [res.name for res in town.residents])
online = [res.name for res in town.residents if res.online]
if len(online) > 0:
embed.add_field(name=f"Online residents [{len(online)}]", value=f"```{', '.join(online)}```", inline=False)
else:
embed.add_field(name="Online residents [0]", value=f"```No online residents in {town}```", inline=False)
embed.set_author(name=ctx.author.nick, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command(aliases=["n"])
async def nation(self, ctx, nation_to_find):
try:
async with ctx.typing():
nation = emc.Nation(nation_to_find, data=await get_data())
except emc.exceptions.NationNotFoundException:
embed = discord.Embed(title=f"The nation {nation_to_find} was not found", colour=0xb00e0e)
else:
embed = discord.Embed(title=nation.name, colour=int(nation.colour[1:], 16))
embed.add_field(name="Leader", value=f"```{nation.leader}```")
embed.add_field(name="Capital", value=f"```{nation.capital}```")
embed.add_field(name="Population", value=f"```{len(nation.citizens)}```")
_long_fields(embed, f"Towns [{len(nation.towns)}]", [town.name for town in nation.towns])
online = [res.name for res in nation.citizens if res.online]
if len(online) > 0:
embed.add_field(name=f"Online [{len(online)}]", value=f"```{', '.join(online)}```", inline=False)
else:
embed.add_field(name="Online [0]", value=f"```0 citizens online in {nation}```", inline=False)
embed.set_author(name=ctx.author.nick, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command(aliases=["res", "player", "pl"])
async def resident(self, ctx, resident_to_find):
async with ctx.typing():
resident = emc.Resident(resident_to_find, data=await get_data())
embed = discord.Embed(title=resident.name, colour=0x0a8cf0)
embed.set_thumbnail(url=f"https://minotar.net/armor/bust/{resident}")
embed.add_field(name="Town", value=f"```{resident.town}```")
embed.add_field(name="Nation", value=f"```{resident.nation}```")
if resident.online:
if resident.hidden:
embed.add_field(name="Position", value=f"```{resident} is currently not visable on the map```")
else:
embed.add_field(name="Position", value=f"```{resident.position[0]}/{resident.position[1]}/{resident.position[2]}```([map]({emc.util.map_link(resident.position)}))")
else:
embed.add_field(name="Position", value=f"```{resident} is currently offline```")
embed.set_author(name=ctx.author.nick, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(AppCmdGeneral(bot))
| 4,099 | 249 | 69 |
7e150203f27443c4c56c3e12dd1125d7de69e5e9 | 1,027 | py | Python | gnes/encoder/audio/mfcc.py | hyzcn/gnes | a556ad287f30e271676b156dc7a47dd7b86956e9 | [
"Apache-2.0"
] | 2 | 2020-07-05T03:51:44.000Z | 2022-02-18T05:56:37.000Z | gnes/encoder/audio/mfcc.py | hyzcn/gnes | a556ad287f30e271676b156dc7a47dd7b86956e9 | [
"Apache-2.0"
] | null | null | null | gnes/encoder/audio/mfcc.py | hyzcn/gnes | a556ad287f30e271676b156dc7a47dd7b86956e9 | [
"Apache-2.0"
] | null | null | null |
from typing import List
import numpy as np
from ..base import BaseAudioEncoder
from ...helper import batching
| 31.121212 | 119 | 0.635833 |
from typing import List
import numpy as np
from ..base import BaseAudioEncoder
from ...helper import batching
class MfccEncoder(BaseAudioEncoder):
batch_size = 64
def __init__(self, n_mfcc: int = 13, sample_rate: int = 16000, max_length: int = 100, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_mfcc = n_mfcc
self.sample_rate = sample_rate
self.max_length = max_length
@batching
def encode(self, data: List['np.array'], *args, **kwargs) -> np.ndarray:
import librosa
mfccs = [np.array(librosa.feature.mfcc(y=audio, sr=self.sample_rate, n_mfcc=self.n_mfcc).T)
for audio in data]
mfccs = [np.concatenate((mf, np.zeros((self.max_length - mf.shape[0], self.n_mfcc), dtype=np.float32)), axis=0)
if mf.shape[0] < self.max_length else mf[:self.max_length] for mf in mfccs]
mfccs = [mfcc.reshape((1, -1)) for mfcc in mfccs]
mfccs = np.squeeze(np.array(mfccs), axis=1)
return mfccs
| 785 | 103 | 23 |
737d8b9d6816301bff076d72cea1c7b08b66645d | 3,017 | py | Python | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/access_to_protected_members.py | OrangeGzY/vimrc | ddcaedce2effbbd1014eddbceebeb8c621cd9f95 | [
"MIT"
] | 1 | 2021-05-08T07:32:20.000Z | 2021-05-08T07:32:20.000Z | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/access_to_protected_members.py | OrangeGzY/vimrc | ddcaedce2effbbd1014eddbceebeb8c621cd9f95 | [
"MIT"
] | null | null | null | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/access_to_protected_members.py | OrangeGzY/vimrc | ddcaedce2effbbd1014eddbceebeb8c621cd9f95 | [
"MIT"
] | null | null | null | # pylint: disable=too-few-public-methods, W0231, print-statement, useless-object-inheritance
# pylint: disable=no-classmethod-decorator
"""Test external access to protected class members."""
from __future__ import print_function
class MyClass(object):
"""Class with protected members."""
_cls_protected = 5
def test(self):
"""Docstring."""
self._protected += self._cls_protected
print(self.public._haha) # [protected-access]
def clsmeth(cls):
"""Docstring."""
cls._cls_protected += 1
print(cls._cls_protected)
clsmeth = classmethod(clsmeth)
def _private_method(self):
"""Doing nothing."""
class Subclass(MyClass):
"""Subclass with protected members."""
INST = Subclass()
INST.attr = 1
print(INST.attr)
INST._protected = 2 # [protected-access]
print(INST._protected) # [protected-access]
INST._cls_protected = 3 # [protected-access]
print(INST._cls_protected) # [protected-access]
class Issue1031(object):
"""Test for GitHub issue 1031"""
_attr = 1
def correct_access(self):
"""Demonstrates correct access"""
return type(self)._attr
def incorrect_access(self):
"""Demonstrates incorrect access"""
if self._attr == 1:
return type(INST)._protected # [protected-access]
return None
class Issue1802(object):
"""Test for GitHub issue 1802"""
def __eq__(self, other):
"""Test a correct access as the access to protected member is in a special method"""
if isinstance(other, self.__class__):
answer = self._foo == other._foo
return answer and self.__private == other.__private # [protected-access]
return False
def not_in_special(self, other):
"""
Test an incorrect access as the access to protected member is not inside a special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
def __le__(self, other):
"""
Test a correct access as the access to protected member
is inside a special method even if it is deeply nested
"""
if 2 > 1:
if isinstance(other, self.__class__):
if "answer" == "42":
return self._foo == other._foo
return False
def __fake_special__(self, other):
"""
Test an incorrect access as the access
to protected member is not inside a licit special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
| 29.578431 | 97 | 0.621478 | # pylint: disable=too-few-public-methods, W0231, print-statement, useless-object-inheritance
# pylint: disable=no-classmethod-decorator
"""Test external access to protected class members."""
from __future__ import print_function
class MyClass(object):
"""Class with protected members."""
_cls_protected = 5
def __init__(self, other):
MyClass._cls_protected = 6
self._protected = 1
self.public = other
self.attr = 0
def test(self):
"""Docstring."""
self._protected += self._cls_protected
print(self.public._haha) # [protected-access]
def clsmeth(cls):
"""Docstring."""
cls._cls_protected += 1
print(cls._cls_protected)
clsmeth = classmethod(clsmeth)
def _private_method(self):
"""Doing nothing."""
class Subclass(MyClass):
"""Subclass with protected members."""
def __init__(self):
MyClass._protected = 5
super()._private_method()
INST = Subclass()
INST.attr = 1
print(INST.attr)
INST._protected = 2 # [protected-access]
print(INST._protected) # [protected-access]
INST._cls_protected = 3 # [protected-access]
print(INST._cls_protected) # [protected-access]
class Issue1031(object):
"""Test for GitHub issue 1031"""
_attr = 1
def correct_access(self):
"""Demonstrates correct access"""
return type(self)._attr
def incorrect_access(self):
"""Demonstrates incorrect access"""
if self._attr == 1:
return type(INST)._protected # [protected-access]
return None
class Issue1802(object):
"""Test for GitHub issue 1802"""
def __init__(self, value):
self._foo = value
self.__private = 2 * value
def __eq__(self, other):
"""Test a correct access as the access to protected member is in a special method"""
if isinstance(other, self.__class__):
answer = self._foo == other._foo
return answer and self.__private == other.__private # [protected-access]
return False
def not_in_special(self, other):
"""
Test an incorrect access as the access to protected member is not inside a special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
def __le__(self, other):
"""
Test a correct access as the access to protected member
is inside a special method even if it is deeply nested
"""
if 2 > 1:
if isinstance(other, self.__class__):
if "answer" == "42":
return self._foo == other._foo
return False
def __fake_special__(self, other):
"""
Test an incorrect access as the access
to protected member is not inside a licit special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
| 247 | 0 | 80 |
fa4088f97a151acd7e55796d129b4ed7a8cd864b | 1,081 | py | Python | image2html/image2html.py | ZQPei/img2html | 06c1d1734593ec753964b92f3331a9959b14638d | [
"MIT"
] | 2 | 2019-12-17T08:51:25.000Z | 2021-05-26T16:25:55.000Z | image2html/image2html.py | ZQPei/img2html | 06c1d1734593ec753964b92f3331a9959b14638d | [
"MIT"
] | 1 | 2020-03-04T05:24:57.000Z | 2020-07-20T09:51:45.000Z | image2html/image2html.py | ZQPei/img2html | 06c1d1734593ec753964b92f3331a9959b14638d | [
"MIT"
] | 1 | 2020-12-09T04:39:11.000Z | 2020-12-09T04:39:11.000Z | from .html_generator import build_html_generator
if __name__ == "__main__":
image2html()
| 41.576923 | 153 | 0.693802 | from .html_generator import build_html_generator
def parse_args():
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("--mode", type=str, choices=['single', 'multiple'], help='To display a single image folder or Compare multiple image folders.')
parser.add_argument("--dirs", type=str, nargs='+', help="Input can be a image folder or a flist text file.")
parser.add_argument("--output", type=str, help="Output html file")
parser.add_argument("--keyword", type=str, nargs='+', default='', help="Keyword of image name")
parser.add_argument("--ext", type=tuple, nargs='+', default=('.png', '.jpg'), help="Extension of image name")
parser.add_argument("--width", type=int, default=256, help="Display width on html page")
parser.add_argument("--recursive", action='store_true', help="Recursive to its sub dir")
args = parser.parse_args()
return args
def image2html():
args = parse_args()
html_generator = build_html_generator(args)
html_generator.generate()
if __name__ == "__main__":
image2html()
| 938 | 0 | 46 |
25351227459bae83e453349353bdc294c0b47886 | 122 | py | Python | Week_1_Python_Basics/Problem Set 1/ps1_problem2.py | marwan1023/MITX.6.00.1x | f054fe1a64c0868668998d1cd44d6bb3c5e431b3 | [
"CNRI-Python"
] | null | null | null | Week_1_Python_Basics/Problem Set 1/ps1_problem2.py | marwan1023/MITX.6.00.1x | f054fe1a64c0868668998d1cd44d6bb3c5e431b3 | [
"CNRI-Python"
] | null | null | null | Week_1_Python_Basics/Problem Set 1/ps1_problem2.py | marwan1023/MITX.6.00.1x | f054fe1a64c0868668998d1cd44d6bb3c5e431b3 | [
"CNRI-Python"
] | null | null | null | bob = 0
for i in range(len(s)-2):
if s[i:i+3] == 'bob':
bob += 1
print('Number of times bob occurs is:', bob)
| 20.333333 | 44 | 0.532787 | bob = 0
for i in range(len(s)-2):
if s[i:i+3] == 'bob':
bob += 1
print('Number of times bob occurs is:', bob)
| 0 | 0 | 0 |
f46d256053c1ccb9894f109ff08cd3706ce3ca7a | 1,021 | py | Python | app.py | rafaelsouzak2b/lista_favoritos | 38cdda263a7a8d157cbfd9abdc4b6e4af3b9f4b8 | [
"MIT"
] | null | null | null | app.py | rafaelsouzak2b/lista_favoritos | 38cdda263a7a8d157cbfd9abdc4b6e4af3b9f4b8 | [
"MIT"
] | null | null | null | app.py | rafaelsouzak2b/lista_favoritos | 38cdda263a7a8d157cbfd9abdc4b6e4af3b9f4b8 | [
"MIT"
] | null | null | null | from marshmallow import ValidationError
from src.app.ma import ma
from src.app.db import db
from src.app.controllers.cliente import Cliente, ClienteList
from src.app.controllers.favoritos import FavoritoList, Favorito
from src.app.controllers.usuario import UsuarioAuth, Usuario
from src.app.server.instance import server
import logging
api = server.api
app = server.app
log = logging.getLogger(__name__)
@app.before_first_request
server.cliente_ns.add_resource(ClienteList, '/clientes')
server.cliente_ns.add_resource(Cliente, '/clientes/<string:email>')
server.favoritos_ns.add_resource(FavoritoList, '/clientes/<string:email>/favoritos')
server.favoritos_ns.add_resource(Favorito, '/clientes/<string:email>/favoritos/<string:id_produto>')
server.usuario_ns.add_resource(UsuarioAuth, '/usuario/auth')
server.usuario_ns.add_resource(Usuario, '/usuario')
if __name__ == '__main__':
log.info('API inicializada')
db.init_app(app)
ma.init_app(app)
server.run() | 31.90625 | 100 | 0.790402 | from marshmallow import ValidationError
from src.app.ma import ma
from src.app.db import db
from src.app.controllers.cliente import Cliente, ClienteList
from src.app.controllers.favoritos import FavoritoList, Favorito
from src.app.controllers.usuario import UsuarioAuth, Usuario
from src.app.server.instance import server
import logging
api = server.api
app = server.app
log = logging.getLogger(__name__)
@app.before_first_request
def create_tables():
db.create_all()
server.cliente_ns.add_resource(ClienteList, '/clientes')
server.cliente_ns.add_resource(Cliente, '/clientes/<string:email>')
server.favoritos_ns.add_resource(FavoritoList, '/clientes/<string:email>/favoritos')
server.favoritos_ns.add_resource(Favorito, '/clientes/<string:email>/favoritos/<string:id_produto>')
server.usuario_ns.add_resource(UsuarioAuth, '/usuario/auth')
server.usuario_ns.add_resource(Usuario, '/usuario')
if __name__ == '__main__':
log.info('API inicializada')
db.init_app(app)
ma.init_app(app)
server.run() | 19 | 0 | 22 |
36e22b9a97d22dae4a3bf254b51c8c019598d7d4 | 24,151 | py | Python | sdk/python/pulumi_okta/behaviour.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-10-29T21:59:22.000Z | 2021-11-08T12:00:24.000Z | sdk/python/pulumi_okta/behaviour.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2020-01-06T10:28:09.000Z | 2022-03-25T19:52:40.000Z | sdk/python/pulumi_okta/behaviour.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-11T16:31:04.000Z | 2020-11-24T12:23:17.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['BehaviourArgs', 'Behaviour']
@pulumi.input_type
@pulumi.input_type
| 44.313761 | 137 | 0.639601 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['BehaviourArgs', 'Behaviour']
@pulumi.input_type
class BehaviourArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
location_granularity_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_authentications: Optional[pulumi.Input[int]] = None,
radius_from_location: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
velocity: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Behaviour resource.
:param pulumi.Input[str] type: Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
:param pulumi.Input[str] location_granularity_type: Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
:param pulumi.Input[str] name: Name of the behavior.
:param pulumi.Input[int] number_of_authentications: The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
:param pulumi.Input[int] radius_from_location: Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
:param pulumi.Input[str] status: The status of the behavior. By default, it is`"ACTIVE"`.
:param pulumi.Input[int] velocity: Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
pulumi.set(__self__, "type", type)
if location_granularity_type is not None:
pulumi.set(__self__, "location_granularity_type", location_granularity_type)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_authentications is not None:
pulumi.set(__self__, "number_of_authentications", number_of_authentications)
if radius_from_location is not None:
pulumi.set(__self__, "radius_from_location", radius_from_location)
if status is not None:
pulumi.set(__self__, "status", status)
if velocity is not None:
pulumi.set(__self__, "velocity", velocity)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="locationGranularityType")
def location_granularity_type(self) -> Optional[pulumi.Input[str]]:
"""
Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
"""
return pulumi.get(self, "location_granularity_type")
@location_granularity_type.setter
def location_granularity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location_granularity_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the behavior.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="numberOfAuthentications")
def number_of_authentications(self) -> Optional[pulumi.Input[int]]:
"""
The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
"""
return pulumi.get(self, "number_of_authentications")
@number_of_authentications.setter
def number_of_authentications(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_authentications", value)
@property
@pulumi.getter(name="radiusFromLocation")
def radius_from_location(self) -> Optional[pulumi.Input[int]]:
"""
Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
"""
return pulumi.get(self, "radius_from_location")
@radius_from_location.setter
def radius_from_location(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "radius_from_location", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the behavior. By default, it is`"ACTIVE"`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def velocity(self) -> Optional[pulumi.Input[int]]:
"""
Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
return pulumi.get(self, "velocity")
@velocity.setter
def velocity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "velocity", value)
@pulumi.input_type
class _BehaviourState:
def __init__(__self__, *,
location_granularity_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_authentications: Optional[pulumi.Input[int]] = None,
radius_from_location: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
velocity: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Behaviour resources.
:param pulumi.Input[str] location_granularity_type: Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
:param pulumi.Input[str] name: Name of the behavior.
:param pulumi.Input[int] number_of_authentications: The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
:param pulumi.Input[int] radius_from_location: Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
:param pulumi.Input[str] status: The status of the behavior. By default, it is`"ACTIVE"`.
:param pulumi.Input[str] type: Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
:param pulumi.Input[int] velocity: Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
if location_granularity_type is not None:
pulumi.set(__self__, "location_granularity_type", location_granularity_type)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_authentications is not None:
pulumi.set(__self__, "number_of_authentications", number_of_authentications)
if radius_from_location is not None:
pulumi.set(__self__, "radius_from_location", radius_from_location)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
if velocity is not None:
pulumi.set(__self__, "velocity", velocity)
@property
@pulumi.getter(name="locationGranularityType")
def location_granularity_type(self) -> Optional[pulumi.Input[str]]:
"""
Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
"""
return pulumi.get(self, "location_granularity_type")
@location_granularity_type.setter
def location_granularity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location_granularity_type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the behavior.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="numberOfAuthentications")
def number_of_authentications(self) -> Optional[pulumi.Input[int]]:
"""
The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
"""
return pulumi.get(self, "number_of_authentications")
@number_of_authentications.setter
def number_of_authentications(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_authentications", value)
@property
@pulumi.getter(name="radiusFromLocation")
def radius_from_location(self) -> Optional[pulumi.Input[int]]:
"""
Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
"""
return pulumi.get(self, "radius_from_location")
@radius_from_location.setter
def radius_from_location(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "radius_from_location", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the behavior. By default, it is`"ACTIVE"`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def velocity(self) -> Optional[pulumi.Input[int]]:
"""
Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
return pulumi.get(self, "velocity")
@velocity.setter
def velocity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "velocity", value)
class Behaviour(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location_granularity_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_authentications: Optional[pulumi.Input[int]] = None,
radius_from_location: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
velocity: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Creates different types of behavior.
This resource allows you to create and configure a behavior.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
my_location = okta.Behaviour("myLocation",
location_granularity_type="LAT_LONG",
number_of_authentications=50,
radius_from_location=20,
type="ANOMALOUS_LOCATION")
my_city = okta.Behaviour("myCity",
location_granularity_type="CITY",
number_of_authentications=50,
type="ANOMALOUS_LOCATION")
my_device = okta.Behaviour("myDevice",
number_of_authentications=50,
type="ANOMALOUS_DEVICE")
my_ip = okta.Behaviour("myIp",
number_of_authentications=50,
type="ANOMALOUS_IP")
my_velocity = okta.Behaviour("myVelocity",
type="VELOCITY",
velocity=25)
```
## Import
Behavior can be imported via the Okta ID.
```sh
$ pulumi import okta:index/behaviour:Behaviour example <behavior id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location_granularity_type: Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
:param pulumi.Input[str] name: Name of the behavior.
:param pulumi.Input[int] number_of_authentications: The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
:param pulumi.Input[int] radius_from_location: Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
:param pulumi.Input[str] status: The status of the behavior. By default, it is`"ACTIVE"`.
:param pulumi.Input[str] type: Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
:param pulumi.Input[int] velocity: Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BehaviourArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates different types of behavior.
This resource allows you to create and configure a behavior.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
my_location = okta.Behaviour("myLocation",
location_granularity_type="LAT_LONG",
number_of_authentications=50,
radius_from_location=20,
type="ANOMALOUS_LOCATION")
my_city = okta.Behaviour("myCity",
location_granularity_type="CITY",
number_of_authentications=50,
type="ANOMALOUS_LOCATION")
my_device = okta.Behaviour("myDevice",
number_of_authentications=50,
type="ANOMALOUS_DEVICE")
my_ip = okta.Behaviour("myIp",
number_of_authentications=50,
type="ANOMALOUS_IP")
my_velocity = okta.Behaviour("myVelocity",
type="VELOCITY",
velocity=25)
```
## Import
Behavior can be imported via the Okta ID.
```sh
$ pulumi import okta:index/behaviour:Behaviour example <behavior id>
```
:param str resource_name: The name of the resource.
:param BehaviourArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BehaviourArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location_granularity_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_authentications: Optional[pulumi.Input[int]] = None,
radius_from_location: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
velocity: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BehaviourArgs.__new__(BehaviourArgs)
__props__.__dict__["location_granularity_type"] = location_granularity_type
__props__.__dict__["name"] = name
__props__.__dict__["number_of_authentications"] = number_of_authentications
__props__.__dict__["radius_from_location"] = radius_from_location
__props__.__dict__["status"] = status
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["velocity"] = velocity
super(Behaviour, __self__).__init__(
'okta:index/behaviour:Behaviour',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
location_granularity_type: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_authentications: Optional[pulumi.Input[int]] = None,
radius_from_location: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
velocity: Optional[pulumi.Input[int]] = None) -> 'Behaviour':
"""
Get an existing Behaviour resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location_granularity_type: Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
:param pulumi.Input[str] name: Name of the behavior.
:param pulumi.Input[int] number_of_authentications: The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
:param pulumi.Input[int] radius_from_location: Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
:param pulumi.Input[str] status: The status of the behavior. By default, it is`"ACTIVE"`.
:param pulumi.Input[str] type: Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
:param pulumi.Input[int] velocity: Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BehaviourState.__new__(_BehaviourState)
__props__.__dict__["location_granularity_type"] = location_granularity_type
__props__.__dict__["name"] = name
__props__.__dict__["number_of_authentications"] = number_of_authentications
__props__.__dict__["radius_from_location"] = radius_from_location
__props__.__dict__["status"] = status
__props__.__dict__["type"] = type
__props__.__dict__["velocity"] = velocity
return Behaviour(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="locationGranularityType")
def location_granularity_type(self) -> pulumi.Output[Optional[str]]:
"""
Determines the method and level of detail used to evaluate the behavior.
Required for `"ANOMALOUS_LOCATION"` behavior type. Can be set to `"LAT_LONG"`, `"CITY"`, `"COUNTRY"`
or `"SUBDIVISION"`.
"""
return pulumi.get(self, "location_granularity_type")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the behavior.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfAuthentications")
def number_of_authentications(self) -> pulumi.Output[Optional[int]]:
"""
The number of recent authentications used to evaluate the behavior. Required
for `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"` and `"ANOMALOUS_IP"` behavior types.
"""
return pulumi.get(self, "number_of_authentications")
@property
@pulumi.getter(name="radiusFromLocation")
def radius_from_location(self) -> pulumi.Output[Optional[int]]:
"""
Radius from location (in kilometers). Should be at least 5. Required
when `location_granularity_type` is set to `"LAT_LONG"`.
"""
return pulumi.get(self, "radius_from_location")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the behavior. By default, it is`"ACTIVE"`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the behavior. Can be set to `"ANOMALOUS_LOCATION"`, `"ANOMALOUS_DEVICE"`, `"ANOMALOUS_IP"`
or `"VELOCITY"`. Resource will be recreated when the type changes.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def velocity(self) -> pulumi.Output[Optional[int]]:
"""
Velocity (in kilometers per hour). Should be at least 1. Required for `"VELOCITY"` behavior
type.
"""
return pulumi.get(self, "velocity")
| 3,494 | 20,174 | 67 |
453a00b5f5eff817a5db5d9cffe28cc248ea085c | 228 | py | Python | ranker/__init__.py | rebryk/SPbAU-IR | 15ef67b49617cc6b67d69ecba5e2585ea56a52b7 | [
"MIT"
] | null | null | null | ranker/__init__.py | rebryk/SPbAU-IR | 15ef67b49617cc6b67d69ecba5e2585ea56a52b7 | [
"MIT"
] | null | null | null | ranker/__init__.py | rebryk/SPbAU-IR | 15ef67b49617cc6b67d69ecba5e2585ea56a52b7 | [
"MIT"
] | null | null | null | import logging
from .ranker import AbstractAndArticle
from .tfidf import TfIdf
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
__all__ = ("AbstractAndArticle", "TfIdf")
| 25.333333 | 103 | 0.741228 | import logging
from .ranker import AbstractAndArticle
from .tfidf import TfIdf
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
__all__ = ("AbstractAndArticle", "TfIdf")
| 0 | 0 | 0 |
566b6caef237f48bef5eff181a6d2a1a17ed799d | 8,460 | py | Python | banner.py | mh-Earth/instaploit | fb871b3c0d7d74df828826314da3893945ee71ad | [
"MIT"
] | 7 | 2021-02-25T17:37:32.000Z | 2022-01-13T15:49:45.000Z | banner.py | mh-Earth/instaploit | fb871b3c0d7d74df828826314da3893945ee71ad | [
"MIT"
] | 1 | 2021-02-25T20:13:18.000Z | 2021-02-27T06:22:12.000Z | banner.py | mh-Earth/instaploit | fb871b3c0d7d74df828826314da3893945ee71ad | [
"MIT"
] | null | null | null | from random import choice
banner1=('''
██
██████ ████ ██
██████ ██ ████ ██ ██
██ ██ ██ ██
██ ██░████ ▒█████░███████ ▒████▓ ██░███▒ ██ ░████░ ████ ███████
██ ███████▓███████████████ ██████▓███████▒██ ░██████░ ████ ███████
██ ███ ▒████▒ ░▒█ ██ █▒ ▒█████ █████ ███ ███ ██ ██
██ ██ ███████▓░ ██ ▒███████░ ░████ ██░ ░██ ██ ██
██ ██ ██░██████▒ ██ ░█████████ ████ ██ ██ ██ ██
██ ██ ██ ░▒▓██ ██ ██▓░ ████░ ░████ ██░ ░██ ██ ██
██ ██ ███▒░ ▒██ ██░ ██▒ ██████ █████▒ ███ ███ ██ ██░
████████ ██████████ ████████████████████▒█████░██████░█████████████
████████ ██░▓████▓ ░████ ▓███░████░███▒ ░████ ░████░ ████████░████
██
██
██
''')
banner2=("""
░▀█▀░█▀█░█▀▀░▀█▀░█▀█░█▀█░█░░░█▀█░▀█▀░▀█▀
░░█░░█░█░▀▀█░░█░░█▀█░█▀▀░█░░░█░█░░█░░░█░
░▀▀▀░▀░▀░▀▀▀░░▀░░▀░▀░▀░░░▀▀▀░▀▀▀░▀▀▀░░▀░
""")
banner3=("""
___ _ _ _ _
|_ _| _ _ ___| |_ __ _ _ __ | | ___ (_)| |_
| | | ' \ (_-<| _|/ _` || '_ \| |/ _ \| || _|
|___||_||_|/__/ \__|\__,_|| .__/|_|\___/|_| \__|
|_|
""")
banner4=('''
mmmmmm mmmm ##
""##"" ## ""## "" ##
## ##m####mmm#####m####### m#####m##m###m ## m####m #### #######
## ##" ####mmmm " ## " mmm####" "#### ##" "## ## ##
## ## ## """"##m ## m##"""#### #### ## ## ## ##
mm##mm## ###mmmmm## ##mmm##mmm######mm##"##mmm"##mm##"mmm##mmm##mmm
"""""""" "" """""" """" """" ""## """ """" """" """""""" """"
##
''')
banner5=("""
##
###### #### ##
###### ## #### ## ##
## ## ## ##
## ##.#### :#####.####### :#### ##.###: ## .####. #### #######
## ####### ############### ###### #######:## .######. #### #######
## ### :####: .:# ## #: :##### ##### ### ### ## ##
## ## ####### . ## :#######. .#### ##. .## ## ##
## ## ##.######: ## .######### #### ## ## ## ##
## ## ## .: ## ## ## . ####. .#### ##. .## ## ##
## ## ###:. :## ##. ##: ###### #####: ### ### ## ##.
######## ########## ####################:#####.######.#############
######## ##. #### .#### ###.####.###: .#### .####. ########.####
##
##
##
""")
banner6=("""
____ __ __ _ __
/ _/____ _____ / /_ ____ _ ____ / /____ (_)/ /_
/ / / __ \ / ___// __// __ `// __ \ / // __ \ / // __/
_/ / / / / /(__ )/ /_ / /_/ // /_/ // // /_/ // // /_
/___//_/ /_//____/ \__/ \__,_// .___//_/ \____//_/ \__/
/_/
""")
banner7=('''
___ _ _ _ _
|_ _|_ __ ___| |_ __ _ _ __ | | ___ (_) |_
| || '_ \/ __| __/ _` | '_ \| |/ _ \| | __|
| || | | \__ \ || (_| | |_) | | (_) | | |_
|___|_| |_|___/\__\__,_| .__/|_|\___/|_|\__|
|_|
''')
banner8=("""
███ █
█████ █ █ █
█ █ █ █
█ █▒██▒▒███▒█████░███░█▓██ █ ███████████
█ █▓ ▒██▒ ░█ █ █▒ ▒██▓ ▓██ █▓ ▓█ █ █
█ █ ██▒░ █ ██ ██ █ █ █ █
█ █ █░███▒ █ ▒█████ ██ █ █ █ █
█ █ █ ▒█ █ █▒ ██ ██ █ █ █ █
█ █ ██░ ▒█ █░ █░ ▓██▓ ▓██░█▓ ▓█ █ █░
██████ █▒███▒ ▒██▒██▒██▓██ ▒██████████▒██
█
█
█
""")
banner9=("""
###
# # # #### ##### ## ##### # #### # #####
# ## # # # # # # # # # # # #
# # # # #### # # # # # # # # # #
# # # # # # ###### ##### # # # # #
# # ## # # # # # # # # # # #
### # # #### # # # # ###### #### # #
""")
banner10=("""
▄▄▄▄▄▄ ▄▄▄▄ ██
▀▀██▀▀ ██ ▀▀██ ▀▀ ██
██ ██▄████▄▄▄█████▄███████ ▄█████▄██▄███▄ ██ ▄████▄ ████ ███████
██ ██▀ ████▄▄▄▄ ▀ ██ ▀ ▄▄▄████▀ ▀████ ██▀ ▀██ ██ ██
██ ██ ██ ▀▀▀▀██▄ ██ ▄██▀▀▀████ ████ ██ ██ ██ ██
▄▄██▄▄██ ███▄▄▄▄▄██ ██▄▄▄██▄▄▄██████▄▄██▀██▄▄▄▀██▄▄██▀▄▄▄██▄▄▄██▄▄▄
▀▀▀▀▀▀▀▀ ▀▀ ▀▀▀▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀██ ▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀▀▀▀▀▀▀ ▀▀▀▀
██
""") | 60.863309 | 298 | 0.089362 | from random import choice
banner1=('''
██
██████ ████ ██
██████ ██ ████ ██ ██
██ ██ ██ ██
██ ██░████ ▒█████░███████ ▒████▓ ██░███▒ ██ ░████░ ████ ███████
██ ███████▓███████████████ ██████▓███████▒██ ░██████░ ████ ███████
██ ███ ▒████▒ ░▒█ ██ █▒ ▒█████ █████ ███ ███ ██ ██
██ ██ ███████▓░ ██ ▒███████░ ░████ ██░ ░██ ██ ██
██ ██ ██░██████▒ ██ ░█████████ ████ ██ ██ ██ ██
██ ██ ██ ░▒▓██ ██ ██▓░ ████░ ░████ ██░ ░██ ██ ██
██ ██ ███▒░ ▒██ ██░ ██▒ ██████ █████▒ ███ ███ ██ ██░
████████ ██████████ ████████████████████▒█████░██████░█████████████
████████ ██░▓████▓ ░████ ▓███░████░███▒ ░████ ░████░ ████████░████
██
██
██
''')
banner2=("""
░▀█▀░█▀█░█▀▀░▀█▀░█▀█░█▀█░█░░░█▀█░▀█▀░▀█▀
░░█░░█░█░▀▀█░░█░░█▀█░█▀▀░█░░░█░█░░█░░░█░
░▀▀▀░▀░▀░▀▀▀░░▀░░▀░▀░▀░░░▀▀▀░▀▀▀░▀▀▀░░▀░
""")
banner3=("""
___ _ _ _ _
|_ _| _ _ ___| |_ __ _ _ __ | | ___ (_)| |_
| | | ' \ (_-<| _|/ _` || '_ \| |/ _ \| || _|
|___||_||_|/__/ \__|\__,_|| .__/|_|\___/|_| \__|
|_|
""")
banner4=('''
mmmmmm mmmm ##
""##"" ## ""## "" ##
## ##m####mmm#####m####### m#####m##m###m ## m####m #### #######
## ##" ####mmmm " ## " mmm####" "#### ##" "## ## ##
## ## ## """"##m ## m##"""#### #### ## ## ## ##
mm##mm## ###mmmmm## ##mmm##mmm######mm##"##mmm"##mm##"mmm##mmm##mmm
"""""""" "" """""" """" """" ""## """ """" """" """""""" """"
##
''')
banner5=("""
##
###### #### ##
###### ## #### ## ##
## ## ## ##
## ##.#### :#####.####### :#### ##.###: ## .####. #### #######
## ####### ############### ###### #######:## .######. #### #######
## ### :####: .:# ## #: :##### ##### ### ### ## ##
## ## ####### . ## :#######. .#### ##. .## ## ##
## ## ##.######: ## .######### #### ## ## ## ##
## ## ## .: ## ## ## . ####. .#### ##. .## ## ##
## ## ###:. :## ##. ##: ###### #####: ### ### ## ##.
######## ########## ####################:#####.######.#############
######## ##. #### .#### ###.####.###: .#### .####. ########.####
##
##
##
""")
banner6=("""
____ __ __ _ __
/ _/____ _____ / /_ ____ _ ____ / /____ (_)/ /_
/ / / __ \ / ___// __// __ `// __ \ / // __ \ / // __/
_/ / / / / /(__ )/ /_ / /_/ // /_/ // // /_/ // // /_
/___//_/ /_//____/ \__/ \__,_// .___//_/ \____//_/ \__/
/_/
""")
banner7=('''
___ _ _ _ _
|_ _|_ __ ___| |_ __ _ _ __ | | ___ (_) |_
| || '_ \/ __| __/ _` | '_ \| |/ _ \| | __|
| || | | \__ \ || (_| | |_) | | (_) | | |_
|___|_| |_|___/\__\__,_| .__/|_|\___/|_|\__|
|_|
''')
banner8=("""
███ █
█████ █ █ █
█ █ █ █
█ █▒██▒▒███▒█████░███░█▓██ █ ███████████
█ █▓ ▒██▒ ░█ █ █▒ ▒██▓ ▓██ █▓ ▓█ █ █
█ █ ██▒░ █ ██ ██ █ █ █ █
█ █ █░███▒ █ ▒█████ ██ █ █ █ █
█ █ █ ▒█ █ █▒ ██ ██ █ █ █ █
█ █ ██░ ▒█ █░ █░ ▓██▓ ▓██░█▓ ▓█ █ █░
██████ █▒███▒ ▒██▒██▒██▓██ ▒██████████▒██
█
█
█
""")
banner9=("""
###
# # # #### ##### ## ##### # #### # #####
# ## # # # # # # # # # # # #
# # # # #### # # # # # # # # # #
# # # # # # ###### ##### # # # # #
# # ## # # # # # # # # # # #
### # # #### # # # # ###### #### # #
""")
banner10=("""
▄▄▄▄▄▄ ▄▄▄▄ ██
▀▀██▀▀ ██ ▀▀██ ▀▀ ██
██ ██▄████▄▄▄█████▄███████ ▄█████▄██▄███▄ ██ ▄████▄ ████ ███████
██ ██▀ ████▄▄▄▄ ▀ ██ ▀ ▄▄▄████▀ ▀████ ██▀ ▀██ ██ ██
██ ██ ██ ▀▀▀▀██▄ ██ ▄██▀▀▀████ ████ ██ ██ ██ ██
▄▄██▄▄██ ███▄▄▄▄▄██ ██▄▄▄██▄▄▄██████▄▄██▀██▄▄▄▀██▄▄██▀▄▄▄██▄▄▄██▄▄▄
▀▀▀▀▀▀▀▀ ▀▀ ▀▀▀▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀██ ▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀▀▀▀▀▀▀ ▀▀▀▀
██
""")
def randomBanners():
banner = [banner1,banner2,banner3,banner4,banner5,banner6,banner7,banner8,banner9,banner10]
ranBanner = choice(banner)
return ranBanner
def ranbomTips():
tips = ["Always feel free to enter back :)","Login is the key","You can always back to home","Removeing login log can be usefull sometime","Press ctrl+c to end a process on liunx","Exit is always avalable","Dont use your instagram account while using instaploit as you login as same the account"]
ranTips = choice(tips)
return ranTips | 478 | 0 | 47 |
08f6e34eb576d4fb663cb60cd23844b93b06d91d | 413 | py | Python | main/migrations/0003_pdfaudio_audio_file.py | VijyantVerma/pdf-to-audio | 2eb925be77c4ee079b27a180521fa7fc460b8e5f | [
"MIT"
] | 1 | 2020-10-14T22:50:01.000Z | 2020-10-14T22:50:01.000Z | main/migrations/0003_pdfaudio_audio_file.py | VijyantVerma/pdf-to-audio | 2eb925be77c4ee079b27a180521fa7fc460b8e5f | [
"MIT"
] | null | null | null | main/migrations/0003_pdfaudio_audio_file.py | VijyantVerma/pdf-to-audio | 2eb925be77c4ee079b27a180521fa7fc460b8e5f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-20 18:25
from django.db import migrations, models
| 21.736842 | 76 | 0.605327 | # Generated by Django 3.1.2 on 2020-10-20 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20201020_2339'),
]
operations = [
migrations.AddField(
model_name='pdfaudio',
name='audio_file',
field=models.FileField(blank=True, null=True, upload_to='recs'),
),
]
| 0 | 299 | 23 |
26d881e5af1dfe72f33db1b99628896f3fc92b9a | 6,732 | py | Python | RAISoft/instruments/Regulator.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | RAISoft/instruments/Regulator.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | RAISoft/instruments/Regulator.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | # This should define the abstract regulator
import threading
from LocalTimer import LocalTimerClass
from DummyMeter import Bedna
from numpy import array
| 40.071429 | 145 | 0.577094 | # This should define the abstract regulator
import threading
from LocalTimer import LocalTimerClass
from DummyMeter import Bedna
from numpy import array
class Regulator(Bedna):
class Feedback:
prop = 0.1
loopTime = 1
measLoopTime = loopTime
memory = None
signal = None
class setpointInfo:
reached = threading.Event()
ready = False
valid = False
timestamp = 0.0
evaluatePeriod = 0.0
sigma = 0.0
loopCounter = 1
deviation = 0.0
value = 0.0 # value of the setpoint
actualValue = 0.0 # reading of the sensor
pastValue = 0.0 # previous reading of the sensor
tolerance = 0.0
fb = Feedback()
setpoint = setpointInfo()
state = True # Do regulation looping?
def __init__(self, Valve=None, Sensor=None, looptime=None,):
self.valve = Valve
self.sensor = Sensor
#self.valve.init()
#self.sensor.init()
self.timer = LocalTimerClass()
self.regulation = threading.Thread(target=self._regLoop, name='RegulatorLoop')
self.setpoint.reached.clear()
if not (looptime == None):
self.fb.loopTime = looptime
# initialize the output
self.Readings = {'SPage(s)':array([]), 'sigma()':array([])} # t is time since the setpoint was reached, sigma is mean square deviation
return
def setNew_setpoint(self, value, tolerance, evalPeriod):
self.cancelRamping() # if there is ramp going on stop it
self.setpoint.reached.clear()
self.setpoint.value = float(value)
self.setpoint.tolerance = float(tolerance)
self.setpoint.evaluatePeriod = float(evalPeriod)
self.setpoint.timestamp = self.timer.getTotalTime()
self.setpoint.valid = True
print ('New setpoint activated: %f' % value)
return
def getSetpoint(self):
return self.setpoint.value
def waitForSetpoint(self, timeout=None):
self.setpoint.reached.wait(timeout)
return
def Measure(self):
raise NotImplementedError
def _evaluate(self):
"See, if the setpoint value has been reached and wait an evaluation period before claiming the setpoint reached"
self._measure_sensor()
time = self.timer.makeTimestamp()
self.setpoint.deviation = self.setpoint.actualValue - self.setpoint.value
if (abs(self.setpoint.deviation) < abs(self.setpoint.tolerance)):
if not self.setpoint.valid:
self.setpoint.valid = True
self.setpoint.timestamp = self.timer.makeTimestamp()
else:
if not self.setpoint.ready:
if self.setpoint.evaluatePeriod < (time - self.setpoint.timestamp):
self.setpoint.ready = True
print ('New setpoint evaluated: %f' % self.setpoint.value)
self.setpoint.reached.set() # set the event setpoint reached !!!
self.setpoint.sigma = 0.0 # zero the statistics
self.setpoint.loopCounter = 1 # zero the counter
else:
self.setpoint.loopCount = self.setpoint.loopCounter + 1
else:
self.setpoint.valid = False
self.setpoint.ready = False
self.setpoint.sigma = 0.0 # zero the statistics
self.setpoint.loopCounter = 1 # zero the counter
return
def _calculateNew(self):
#self.fb.memory = self.fb.signal
#signal = self.fb.prop * (self.setpoint.actualValue - self.setpoint.value)
#self.fb.signal = signal
pass
raise NotImplemetedError
def _adjust(self):
#print self.setpoint.value,self.setpoint.actualValue, self.fb.signal
self.valve.Set(self.fb.signal)
return
def _regLoop(self):
while True:
if self.state:
self.fb.measLoopTime = self.timer.zeroTimer()
self._evaluate()
self._collect_statistics()
self._calculateNew()
self._adjust()
self.timer.Wait(self.fb.loopTime)
else:
pass
return
def _collect_statistics(self):
if self.setpoint.ready:
dev = self.setpoint.deviation
sqDev = dev * dev
self.setpoint.sigma = self.setpoint.sigma + sqDev
return
def linRamp(self, difference=0.0, duration=0.0):
""" spans a ramping thread in background which manipulates the setpoint by given parameters """
class ramp(threading.Thread):
def __init__(self, difference,duration,regulator):
threading.Thread.__init__(self)
self.diff = difference # ramp height in regulator units
self.duration = duration # ramp duration in seconds
self.regulator = regulator
self.clock = LocalTimerClass()
self.clock.zeroTimer()
self.roughness = 1 # how many (approx) regulator steps should be taken before changing the setpoint?
self.whiteflag = False
return
def run(self):
elapsed = 0.0
originalValue = self.regulator.setpoint.value
while elapsed < self.duration:
self.clock.Wait( self.roughness * self.regulator.fb.loopTime)
elapsed = self.clock.getSinceLastZeroed()
# manipulate the regulator setpoint:
if self.whiteflag:
break # terminate the ramping loop
else:
value = originalValue + (self.diff / self.duration * elapsed)
self.regulator.setpoint.value = value
return
def stop(self):
""" terminates the ramping Loop """
self.whiteflag = True
return
self.ramp = ramp(difference, duration, self)
self.ramp.start()
return
def rampingInProgress(self):
try:
alive = self.ramp.isAlive()
except:
alive = False
return alive
def cancelRamping(self):
if self.rampingInProgress():
self.ramp.stop()
def startRegulation(self):
self.regulation.setDaemon(True)
self.regulation.start()
return
def regulate(self, state):
"set the regulation active or inactive"
self.state = bool(state)
return
def _measure_sensor(self):
raise NotImplementedError
| 3,554 | 2,996 | 23 |
ec42a3a648d541e2ff3fbadfec0b10d0704f5e07 | 9,521 | py | Python | src/api.py | ZingLix/alala | c305261bdf3c852509786596851f348b79ca3547 | [
"MIT"
] | null | null | null | src/api.py | ZingLix/alala | c305261bdf3c852509786596851f348b79ca3547 | [
"MIT"
] | null | null | null | src/api.py | ZingLix/alala | c305261bdf3c852509786596851f348b79ca3547 | [
"MIT"
] | null | null | null | import json
from os import abort
from flask import Flask, request
from flask_cors import CORS
from bson import ObjectId
import flask_login
from requests import api
import permission
import requests
from Util.db import rule_db, keywords_db, bili_mtr_db, user_db, permission_db, api_db
from rule import keywords, update_keywords_list, update_rules
from qqbot import send, get
from user import current_login_user, register_user_module
from flask_login import login_required
import secrets
from flask_login import current_user
app = Flask(__name__)
app.secret_key = secrets.token_urlsafe(16)
register_user_module(app)
CORS(app)
@app.route("/api/rules/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/rules/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/rules/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/rules/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/keywords/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/keywords/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/keywords/<keyword_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/keywords/<keyword_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/send_group/", methods=["POST"], strict_slashes=False)
@app.route("/api/send/", methods=["POST"], strict_slashes=False)
@app.route("/api/groups", methods=["GET"])
@login_required
@app.route("/api/friends", methods=["GET"])
@login_required
@app.route("/api/bili_monitor", methods=["GET"])
@login_required
@app.route("/api/bili_monitor/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/bili_monitor/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/bili_monitor/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/permission/", methods=["GET"])
@login_required
@app.route("/api/permission/<username>", methods=["POST"])
@login_required
@app.route("/api/self_permission/", methods=["GET"])
@login_required
@app.route("/api/key/<username>", methods=["POST"])
@login_required
@app.route("/api/api/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/api/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/api/<api_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/api/<api_id>", methods=["DELETE"], strict_slashes=False)
@login_required
| 30.41853 | 88 | 0.664426 | import json
from os import abort
from flask import Flask, request
from flask_cors import CORS
from bson import ObjectId
import flask_login
from requests import api
import permission
import requests
from Util.db import rule_db, keywords_db, bili_mtr_db, user_db, permission_db, api_db
from rule import keywords, update_keywords_list, update_rules
from qqbot import send, get
from user import current_login_user, register_user_module
from flask_login import login_required
import secrets
from flask_login import current_user
app = Flask(__name__)
app.secret_key = secrets.token_urlsafe(16)
register_user_module(app)
CORS(app)
@app.route("/api/rules/", methods=["POST"], strict_slashes=False)
@login_required
def add_postprocess_file():
r = request.json
r["creator"] = current_user.username
id = str(rule_db.insert_one(r).inserted_id)
update_rules()
return json.dumps({"_id": id})
@app.route("/api/rules/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
def update_postprocess_file(rule_id):
r = request.json
r.pop("creator", None)
rule_db.update_one({"_id": ObjectId(rule_id)}, {"$set": r})
update_rules()
return json.dumps({"status": "success"})
@app.route("/api/rules/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
def delete_postprocess_file(rule_id):
rule_db.delete_one({"_id": ObjectId(rule_id), "creator": current_user.username})
update_rules()
return json.dumps({"status": "success"})
@app.route("/api/rules/", methods=["GET"], strict_slashes=False)
@login_required
def get_postprocess_file():
res = []
if permission.get_current_permission()["role"] != 0:
rule_it = rule_db.find({"creator": current_user.username})
else:
rule_it = rule_db.find()
for item in rule_it:
item["_id"] = str(item["_id"])
res.append(item)
return json.dumps(res)
@app.route("/api/keywords/", methods=["GET"], strict_slashes=False)
@login_required
def get_keywords():
res = []
if permission.get_current_permission()["role"] != 0:
keyword_it = keywords_db.find({"creator": current_user.username})
else:
keyword_it = keywords_db.find()
for item in keyword_it:
item["_id"] = str(item["_id"])
res.append(item)
return json.dumps(res)
@app.route("/api/keywords/", methods=["POST"], strict_slashes=False)
@login_required
def add_keywords():
r = request.json
r["creator"] = current_user.username
id = str(keywords_db.insert_one(r).inserted_id)
update_keywords_list()
return json.dumps({"_id": id})
@app.route("/api/keywords/<keyword_id>", methods=["POST"], strict_slashes=False)
@login_required
def update_keywords(keyword_id):
r = request.json
r.pop("creator", None)
keywords_db.update_one({"_id": ObjectId(keyword_id)}, {"$set": r})
update_keywords_list()
return json.dumps({"status": "success"})
@app.route("/api/keywords/<keyword_id>", methods=["DELETE"], strict_slashes=False)
@login_required
def delete_keywords(keyword_id):
keywords_db.delete_one(
{"_id": ObjectId(keyword_id), "creator": current_user.username}
)
update_keywords_list()
return json.dumps({"status": "success"})
@app.route("/api/send_group/", methods=["POST"], strict_slashes=False)
def remote_send_group_msg():
recv_req = request.json
key = request.args.get("key")
if key is None:
perm = permission.get_current_permission()
else:
perm = permission_db.find_one({"key": str(key)})
if perm is None or not permission.check_per_group_permission(
perm, recv_req["target"]
):
return json.dumps({"status": "error", "error": "no permission"}), 403
send(
"sendGroupMessage",
{
"target": recv_req["target"],
"messageChain": [{"type": "Plain", "text": recv_req["message"]}],
},
)
return json.dumps({"status": "success"})
@app.route("/api/send/", methods=["POST"], strict_slashes=False)
def remote_send_personal_msg():
recv_req = request.json
key = request.args.get("key")
if key is None or key == "":
perm = permission.get_current_permission()
else:
perm = permission_db.find_one({"key": str(key)})
if perm is None or not permission.check_per_person_permission(
perm, recv_req["target"]
):
return json.dumps({"status": "error", "error": "no permission"}), 403
send(
"sendFriendMessage",
{
"target": recv_req["target"],
"messageChain": [{"type": "Plain", "text": recv_req["message"]}],
},
)
return json.dumps({"status": "success"})
@app.route("/api/groups", methods=["GET"])
@login_required
def group_list():
group = get("groupList")
group = group["data"]
perm = permission.get_current_permission()
perm_group = set(perm["group"])
if perm["role"] != 0:
group = [g for g in group if g["id"] in perm_group]
return json.dumps(group)
@app.route("/api/friends", methods=["GET"])
@login_required
def friend_list():
person = get("friendList")
person = person["data"]
perm = permission.get_current_permission()
perm_person = set(perm["person"])
if perm["role"] != 0:
person = [g for g in person if g["id"] in perm_person]
return json.dumps(person)
@app.route("/api/bili_monitor", methods=["GET"])
@login_required
def get_bili_mtr():
res = []
if permission.get_current_permission()["role"] != 0:
mtr_it = bili_mtr_db.find({"creator": current_user.username})
else:
mtr_it = bili_mtr_db.find()
for item in mtr_it:
item["_id"] = str(item["_id"])
res.append(item)
return json.dumps(res)
@app.route("/api/bili_monitor/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
def update_bili_mtr(rule_id):
r = request.json
r.pop("creator", None)
bili_mtr_db.update_one({"_id": ObjectId(rule_id)}, {"$set": r})
return json.dumps({"status": "success"})
@app.route("/api/bili_monitor/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
def delete_bili_mtr(rule_id):
bili_mtr_db.delete_one({"_id": ObjectId(rule_id), "creator": current_user.username})
return json.dumps({"status": "success"})
@app.route("/api/bili_monitor/", methods=["POST"], strict_slashes=False)
@login_required
def add_bili_mtr():
rule = request.json
rule["creator"] = current_user.username
bid = rule["uid"]
user_info = requests.get(
"https://api.bilibili.com/x/space/acc/info?mid={}&jsonp=jsonp".format(bid)
).json()["data"]["name"]
rule["name"] = user_info
id = str(bili_mtr_db.insert_one(rule).inserted_id)
return json.dumps({"_id": id})
@app.route("/api/permission/", methods=["GET"])
@login_required
def get_user_permission():
user = flask_login.current_user
perm = permission.get_permission(user.username)
perm_list = []
if perm["role"] == 0:
for user in user_db.find():
perm_list.append(permission.get_permission(user["username"]))
else:
perm_list.append(perm)
return json.dumps(perm_list)
@app.route("/api/permission/<username>", methods=["POST"])
@login_required
def update_user_permission(username):
perm = permission.get_current_permission()
if perm["role"] != 0:
return json.dumps({"status": "error", "error": "no permission"}), 403
r = request.json
permission.update_permission(username, {"person": r["person"], "group": r["group"]})
return json.dumps({"status": "success"})
@app.route("/api/self_permission/", methods=["GET"])
@login_required
def get_self_permission():
user = flask_login.current_user
perm = permission.get_permission(user.username)
return json.dumps(perm)
@app.route("/api/key/<username>", methods=["POST"])
@login_required
def update_key(username):
user = flask_login.current_user
if (
permission.get_current_permission()["role"] != 0
and current_user.username != username
):
return json.dumps({"status": "error", "error": "no permission"}), 403
permission_db.update_one(
{"username": username}, {"$set": {"key": permission.generate_key()}}
)
return json.dumps({"status": "success"})
@app.route("/api/api/", methods=["POST"], strict_slashes=False)
@login_required
def add_api():
rule = request.json
rule["creator"] = current_user.username
id = str(api_db.insert_one(rule).inserted_id)
return json.dumps({"_id": id})
@app.route("/api/api/", methods=["GET"], strict_slashes=False)
@login_required
def get_api():
user = flask_login.current_user
perm = permission.get_permission(user.username)
api_list = []
if perm["role"] == 0:
api_iter = api_db.find()
else:
api_iter = api_db.find({"creator": current_user.username})
for api in api_iter:
api["_id"] = str(api["_id"])
api_list.append(api)
return json.dumps(api_list)
@app.route("/api/api/<api_id>", methods=["POST"], strict_slashes=False)
@login_required
def update_api(api_id):
r = request.json
r.pop("creator", None)
api_db.update_one({"_id": ObjectId(api_id)}, {"$set": r})
return json.dumps({"status": "success"})
@app.route("/api/api/<api_id>", methods=["DELETE"], strict_slashes=False)
@login_required
def delete_api(api_id):
api_db.delete_one({"_id": ObjectId(api_id), "creator": current_user.username})
return json.dumps({"status": "success"})
| 6,388 | 0 | 528 |
11d2d9fdeeee1810d1147f8e6f96a95243432786 | 4,096 | py | Python | jacdac/jacscript_manager/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-15T21:30:36.000Z | 2022-02-15T21:30:36.000Z | jacdac/jacscript_manager/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | null | null | null | jacdac/jacscript_manager/client.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-08T19:32:45.000Z | 2022-02-08T19:32:45.000Z | # Autogenerated file. Do not edit.
from jacdac.bus import Bus, Client, EventHandlerFn, UnsubscribeFn
from .constants import *
from typing import Optional
class JacscriptManagerClient(Client):
"""
Allows for deployment and control over Jacscript virtual machine.
*
* Programs start automatically after device restart or uploading of new program.
* You can stop programs until next reset by setting the `running` register to `0`.
*
* TODO - debug interface:
* * read-globals command/register
* * globals-changed pipe
* * breakpoint command
* * some performance monitoring?
Implements a client for the `Jacscript Manager <https://microsoft.github.io/jacdac-docs/services/jacscriptmanager>`_ service.
"""
@property
def running(self) -> Optional[bool]:
"""
Indicates if the program is currently running.
To restart the program, stop it (write `0`), read back the register to make sure it's stopped,
start it, and read back.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_RUNNING).bool_value()
@running.setter
@property
def autostart(self) -> Optional[bool]:
"""
Indicates wheather the program should be re-started upon `reboot()` or `panic()`.
Defaults to `true`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_AUTOSTART).bool_value()
@autostart.setter
@property
def logging(self) -> Optional[bool]:
"""
`log_message` reports are only sent when this is `true`.
It defaults to `false`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_LOGGING).bool_value()
@logging.setter
@property
def program_size(self) -> Optional[int]:
"""
The size of current program.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_SIZE).value()
@property
def program_hash(self) -> Optional[int]:
"""
Return FNV1A hash of the current bytecode.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_HASH).value()
def on_program_panic(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted when the program calls `panic(panic_code)` or `reboot()` (`panic_code == 0` in that case).
The byte offset in byte code of the call is given in `program_counter`.
The program will restart immediately when `panic_code == 0` or in a few seconds otherwise.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_PANIC, handler)
def on_program_change(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted after bytecode of the program has changed.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_CHANGE, handler)
def deploy_bytecode(self, bytecode_size: int) -> None:
"""
Open pipe for streaming in the bytecode of the program. The size of the bytecode has to be declared upfront.
To clear the program, use `bytecode_size == 0`.
The bytecode is streamed over regular pipe data packets.
The bytecode shall be fully written into flash upon closing the pipe.
If `autostart` is true, the program will start after being deployed.
The data payloads, including the last one, should have a size that is a multiple of 32 bytes.
Thus, the initial bytecode_size also needs to be a multiple of 32.
"""
self.send_cmd_packed(JD_JACSCRIPT_MANAGER_CMD_DEPLOY_BYTECODE, bytecode_size)
| 37.925926 | 129 | 0.676514 | # Autogenerated file. Do not edit.
from jacdac.bus import Bus, Client, EventHandlerFn, UnsubscribeFn
from .constants import *
from typing import Optional
class JacscriptManagerClient(Client):
"""
Allows for deployment and control over Jacscript virtual machine.
*
* Programs start automatically after device restart or uploading of new program.
* You can stop programs until next reset by setting the `running` register to `0`.
*
* TODO - debug interface:
* * read-globals command/register
* * globals-changed pipe
* * breakpoint command
* * some performance monitoring?
Implements a client for the `Jacscript Manager <https://microsoft.github.io/jacdac-docs/services/jacscriptmanager>`_ service.
"""
def __init__(self, bus: Bus, role: str) -> None:
super().__init__(bus, JD_SERVICE_CLASS_JACSCRIPT_MANAGER, JD_JACSCRIPT_MANAGER_PACK_FORMATS, role)
@property
def running(self) -> Optional[bool]:
"""
Indicates if the program is currently running.
To restart the program, stop it (write `0`), read back the register to make sure it's stopped,
start it, and read back.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_RUNNING).bool_value()
@running.setter
def running(self, value: bool) -> None:
self.register(JD_JACSCRIPT_MANAGER_REG_RUNNING).set_values(value)
@property
def autostart(self) -> Optional[bool]:
"""
Indicates wheather the program should be re-started upon `reboot()` or `panic()`.
Defaults to `true`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_AUTOSTART).bool_value()
@autostart.setter
def autostart(self, value: bool) -> None:
self.register(JD_JACSCRIPT_MANAGER_REG_AUTOSTART).set_values(value)
@property
def logging(self) -> Optional[bool]:
"""
`log_message` reports are only sent when this is `true`.
It defaults to `false`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_LOGGING).bool_value()
@logging.setter
def logging(self, value: bool) -> None:
self.register(JD_JACSCRIPT_MANAGER_REG_LOGGING).set_values(value)
@property
def program_size(self) -> Optional[int]:
"""
The size of current program.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_SIZE).value()
@property
def program_hash(self) -> Optional[int]:
"""
Return FNV1A hash of the current bytecode.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_HASH).value()
def on_program_panic(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted when the program calls `panic(panic_code)` or `reboot()` (`panic_code == 0` in that case).
The byte offset in byte code of the call is given in `program_counter`.
The program will restart immediately when `panic_code == 0` or in a few seconds otherwise.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_PANIC, handler)
def on_program_change(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted after bytecode of the program has changed.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_CHANGE, handler)
def deploy_bytecode(self, bytecode_size: int) -> None:
"""
Open pipe for streaming in the bytecode of the program. The size of the bytecode has to be declared upfront.
To clear the program, use `bytecode_size == 0`.
The bytecode is streamed over regular pipe data packets.
The bytecode shall be fully written into flash upon closing the pipe.
If `autostart` is true, the program will start after being deployed.
The data payloads, including the last one, should have a size that is a multiple of 32 bytes.
Thus, the initial bytecode_size also needs to be a multiple of 32.
"""
self.send_cmd_packed(JD_JACSCRIPT_MANAGER_CMD_DEPLOY_BYTECODE, bytecode_size)
| 414 | 0 | 105 |
098e835eaf8c13ab80b472d86955cbff1de3448b | 475 | py | Python | _site/Class 7/binarysearch.py | noahklein/pythonClass | 8c33443cfc53f648440d40b6ed3e2ea952ce8876 | [
"MIT"
] | null | null | null | _site/Class 7/binarysearch.py | noahklein/pythonClass | 8c33443cfc53f648440d40b6ed3e2ea952ce8876 | [
"MIT"
] | null | null | null | _site/Class 7/binarysearch.py | noahklein/pythonClass | 8c33443cfc53f648440d40b6ed3e2ea952ce8876 | [
"MIT"
] | null | null | null | #example:
a = range(1,25)
print(binarySearch(a,1,debug=True)) | 29.6875 | 68 | 0.745263 | def binarySearch(listOfNumbers,number,debug=False):
if debug:
print(listOfNumbers)
length = len(listOfNumbers)
if listOfNumbers[length//2] == number:
return True
if length == 1:
return False
elif listOfNumbers[length//2] > number:
return binarySearch(listOfNumbers[:length//2], number,debug=debug)
else: # if its less than number
return binarySearch(listOfNumbers[length//2:], number,debug=debug)
#example:
a = range(1,25)
print(binarySearch(a,1,debug=True)) | 392 | 0 | 22 |
087959f59cb4f153652d2ea4b82d4f53ec62cdb3 | 2,435 | py | Python | foodx_devops_tools/pipeline_config/deployments.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 3 | 2021-06-23T20:53:43.000Z | 2022-01-26T14:19:43.000Z | foodx_devops_tools/pipeline_config/deployments.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 33 | 2021-08-09T15:44:51.000Z | 2022-03-03T18:28:02.000Z | foodx_devops_tools/pipeline_config/deployments.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 1 | 2021-06-23T20:53:52.000Z | 2021-06-23T20:53:52.000Z | # Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Subscriptions deployment configuration I/O."""
import pathlib
import typing
import pydantic
from ._exceptions import DeploymentsDefinitionError
from ._loader import load_configuration
ENTITY_NAME = "deployments"
class DeploymentLocations(pydantic.BaseModel):
"""Define primary and/or secondary locations for deployment."""
primary: str
secondary: typing.Optional[str]
class DeploymentSubscriptionReference(pydantic.BaseModel):
"""A subscription reference in a deployment definition."""
gitref_patterns: typing.Optional[typing.List[str]]
locations: typing.List[DeploymentLocations]
root_fqdn: str
class SingularDeployment(pydantic.BaseModel):
"""Definition of a singular deployment."""
subscriptions: typing.Dict[str, DeploymentSubscriptionReference]
ValueType = typing.Dict[str, SingularDeployment]
class DeploymentsEndpointsDefinitions(pydantic.BaseModel):
"""Definition of deployment tuples and URL endpoints."""
url_endpoints: typing.List[str]
deployment_tuples: ValueType
T = typing.TypeVar("T", bound="DeploymentsDefinition")
class DeploymentsDefinition(pydantic.BaseModel):
"""Definition of deployments."""
deployments: DeploymentsEndpointsDefinitions
@pydantic.validator(ENTITY_NAME)
def check_deployments(
cls: pydantic.BaseModel, value: DeploymentsEndpointsDefinitions
) -> DeploymentsEndpointsDefinitions:
"""Validate ``deployment_tuples`` field."""
if not value:
raise ValueError("Empty deployment prohibited")
if not value.deployment_tuples:
raise ValueError("Empty deployment names prohibited")
return value
def load_deployments(file_path: pathlib.Path) -> DeploymentsDefinition:
"""
Load client definitions from file.
Args:
file_path: Path to client definitions file.
Returns:
Deployment definitions.
Raises:
DeploymentsDefinitionError: If an error occurs loading the file.
"""
result = load_configuration(
file_path,
DeploymentsDefinition,
DeploymentsDefinitionError,
ENTITY_NAME,
)
return typing.cast(DeploymentsDefinition, result)
| 26.182796 | 73 | 0.731828 | # Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Subscriptions deployment configuration I/O."""
import pathlib
import typing
import pydantic
from ._exceptions import DeploymentsDefinitionError
from ._loader import load_configuration
ENTITY_NAME = "deployments"
class DeploymentLocations(pydantic.BaseModel):
"""Define primary and/or secondary locations for deployment."""
primary: str
secondary: typing.Optional[str]
class DeploymentSubscriptionReference(pydantic.BaseModel):
"""A subscription reference in a deployment definition."""
gitref_patterns: typing.Optional[typing.List[str]]
locations: typing.List[DeploymentLocations]
root_fqdn: str
class SingularDeployment(pydantic.BaseModel):
"""Definition of a singular deployment."""
subscriptions: typing.Dict[str, DeploymentSubscriptionReference]
ValueType = typing.Dict[str, SingularDeployment]
class DeploymentsEndpointsDefinitions(pydantic.BaseModel):
"""Definition of deployment tuples and URL endpoints."""
url_endpoints: typing.List[str]
deployment_tuples: ValueType
T = typing.TypeVar("T", bound="DeploymentsDefinition")
class DeploymentsDefinition(pydantic.BaseModel):
"""Definition of deployments."""
deployments: DeploymentsEndpointsDefinitions
@pydantic.validator(ENTITY_NAME)
def check_deployments(
cls: pydantic.BaseModel, value: DeploymentsEndpointsDefinitions
) -> DeploymentsEndpointsDefinitions:
"""Validate ``deployment_tuples`` field."""
if not value:
raise ValueError("Empty deployment prohibited")
if not value.deployment_tuples:
raise ValueError("Empty deployment names prohibited")
return value
def load_deployments(file_path: pathlib.Path) -> DeploymentsDefinition:
"""
Load client definitions from file.
Args:
file_path: Path to client definitions file.
Returns:
Deployment definitions.
Raises:
DeploymentsDefinitionError: If an error occurs loading the file.
"""
result = load_configuration(
file_path,
DeploymentsDefinition,
DeploymentsDefinitionError,
ENTITY_NAME,
)
return typing.cast(DeploymentsDefinition, result)
| 0 | 0 | 0 |
2c9422bc77aae39ebb1357b4e31d9b7bdbe7f017 | 36,548 | py | Python | codes/singint.py | Hadrien-Montanelli/singintpy | 1706afe42d0cc6e0f3c53759d489f7209e50ef29 | [
"MIT"
] | null | null | null | codes/singint.py | Hadrien-Montanelli/singintpy | 1706afe42d0cc6e0f3c53759d489f7209e50ef29 | [
"MIT"
] | null | null | null | codes/singint.py | Hadrien-Montanelli/singintpy | 1706afe42d0cc6e0f3c53759d489f7209e50ef29 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 15:39:49 2021
Copyright 2021 by Hadrien Montanelli.
"""
import numpy as np
from numpy.linalg import norm
from scipy.optimize import minimize
def singint(A, x0, n, p, trans=[True, True, True], optim='BFGS', quad='numerical'):
"""
Compute a singular or near-singular integral over a quadratic triangle via
the algorithm presented in [1]. Mathematically, it computes the integral
I = int_T dS(x)/|x-x0|
over a quadratic triangle T.
Inputs
------
A : numpy.ndarray
The six points that define the quadratic triangle as a 6x3 matrix.
x0 : numpy.ndarray
The singularity as a 3x1 vector.
n : int
The quadrature size; 2D integrals will use n*n points, 1D integrals
will use 10*n points.
p : int
The regularization order; -1, 0, or 1 for T_{-1}, T_0 or T_1
regularization.
trans : list
A list of bool variables that specify to if one wants to use
transplanted quadrature for T_{-1}, T_0 and T_1 regularization.
optim : str
The method used for locating the singularity ('BFGS' or 'Newton').
quad : str
The method for computing the integral of T_{-1} ('numerical or 'exact').
Output
------
I : float
The value of the integral.
References
----------
[1] H. Montanelli, M. Aussal and H. Haddar, Computing weakly singular and
near-singular integrals in high-order boundary elements, submitted.
"""
# Tolerance for optimization and near-singularities:
tol = 1e-12
# Step 1: Map back to reference triangle.
F = map_func(A)
J = map_jac(A)
H = map_hess(A)
# Step 2: Locating the singularity or near-singularity.
x0h = locate_sing(x0, F, J, H, optim, tol)
eh = F(x0h) - x0
h = norm(eh)
if (h > tol):
eh /= h
else:
h = 0
eh = np.zeros(3)
# Step 3: Compute regularized part with 2D quadrature.
Ireg = compute_Ireg(x0, x0h, h, eh, F, J, H, n, p)
# Step 4: Integrate Taylor terms.
scl = 10
In1 = compute_In1(x0h, h, J, scl*n, quad, trans, tol)
if (p == -1):
I = In1 + Ireg
elif (p > -1):
I0 = compute_I0(x0h, h, eh, J, H, scl*n, trans, tol)
if (p == 0):
I = In1 + I0 + Ireg
elif (p == 1):
I1 = compute_I1(x0h, h, eh, J, H, scl*n, trans, tol)
I = In1 + I0 + I1 + Ireg
return I | 36.548 | 85 | 0.435126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 15:39:49 2021
Copyright 2021 by Hadrien Montanelli.
"""
import numpy as np
from numpy.linalg import norm
from scipy.optimize import minimize
def singint(A, x0, n, p, trans=[True, True, True], optim='BFGS', quad='numerical'):
"""
Compute a singular or near-singular integral over a quadratic triangle via
the algorithm presented in [1]. Mathematically, it computes the integral
I = int_T dS(x)/|x-x0|
over a quadratic triangle T.
Inputs
------
A : numpy.ndarray
The six points that define the quadratic triangle as a 6x3 matrix.
x0 : numpy.ndarray
The singularity as a 3x1 vector.
n : int
The quadrature size; 2D integrals will use n*n points, 1D integrals
will use 10*n points.
p : int
The regularization order; -1, 0, or 1 for T_{-1}, T_0 or T_1
regularization.
trans : list
A list of bool variables that specify to if one wants to use
transplanted quadrature for T_{-1}, T_0 and T_1 regularization.
optim : str
The method used for locating the singularity ('BFGS' or 'Newton').
quad : str
The method for computing the integral of T_{-1} ('numerical or 'exact').
Output
------
I : float
The value of the integral.
References
----------
[1] H. Montanelli, M. Aussal and H. Haddar, Computing weakly singular and
near-singular integrals in high-order boundary elements, submitted.
"""
# Tolerance for optimization and near-singularities:
tol = 1e-12
# Step 1: Map back to reference triangle.
F = map_func(A)
J = map_jac(A)
H = map_hess(A)
# Step 2: Locating the singularity or near-singularity.
x0h = locate_sing(x0, F, J, H, optim, tol)
eh = F(x0h) - x0
h = norm(eh)
if (h > tol):
eh /= h
else:
h = 0
eh = np.zeros(3)
# Step 3: Compute regularized part with 2D quadrature.
Ireg = compute_Ireg(x0, x0h, h, eh, F, J, H, n, p)
# Step 4: Integrate Taylor terms.
scl = 10
In1 = compute_In1(x0h, h, J, scl*n, quad, trans, tol)
if (p == -1):
I = In1 + Ireg
elif (p > -1):
I0 = compute_I0(x0h, h, eh, J, H, scl*n, trans, tol)
if (p == 0):
I = In1 + I0 + Ireg
elif (p == 1):
I1 = compute_I1(x0h, h, eh, J, H, scl*n, trans, tol)
I = In1 + I0 + I1 + Ireg
return I
def locate_sing(x0, F, J, H, method, tol):
# Cost function and gradient:
e = lambda x: F(x) - x0
cost_func = lambda x: norm(e(x))**2
cost_grad = lambda x: 2*np.array([e(x) @ J[0](x), e(x) @ J[1](x)])
# BFGS:
if (method == 'BFGS'):
res = minimize(cost_func, np.zeros(2), method='BFGS', jac=cost_grad,
options={'disp': False}, tol=tol)
x0h = res.x
return x0h
# Gradient descent followed by Newton's method:
if (method == 'Newton'):
cost_hess = lambda x: 2*np.array([[e(x)@H[0](x) + J[0](x)@J[0](x),
e(x)@H[1](x) + J[0](x)@J[1](x)],
[e(x)@H[1](x) + J[0](x)@J[1](x),
e(x)@H[2](x) + J[1](x)@J[1](x)]])
x0h = np.zeros(2)
G0 = np.ones_like(x0h)
c = 1e-4
beta = 1e-3
tau = 0.5
itr_max = 1000
itr = 0
while (norm(G0) > tol and itr < itr_max):
G0 = cost_grad(x0h)
H0 = cost_hess(x0h)
l = np.min(np.linalg.eig(H0)[0]);
if (l < 0):
H0 += max(0, beta - l)*np.eye(2);
p = -np.linalg.solve(H0, G0)
alpha = 1
if (norm(G0) > 1e-2):
while (cost_func(x0h + alpha*p) > cost_func(x0h) + c*alpha*(p @ G0)):
alpha *= tau
x0h = x0h + alpha*p
itr += 1
return x0h
def compute_Ireg(x0, x0h, h, eh, F, J, H, n, p):
# Note: the variable v belows correponds to psi in [1].
# Singular integrand:
X0 = np.zeros([3, n, n])
X0[0] = x0[0]
X0[1] = x0[1]
X0[2] = x0[2]
tmp = lambda x: norm(np.cross(J[0](x), J[1](x), axis=0), axis=0)
nrm = lambda x: norm(F(x) - X0, axis=0)
sing_func = lambda x: tmp(x)/nrm(x)
# Tn1 term:
X0h = np.zeros([2, n, n])
X0h[0] = x0h[0]
X0h[1] = x0h[1]
tmp0 = norm(np.cross(J[0](x0h), J[1](x0h)))
J0 = np.vstack((J[0](x0h), J[1](x0h))).T
nrm0 = lambda x: np.sqrt(norm(mul_func(J0, x - X0h, n), axis=0)**2 + h**2)
Tn1 = lambda x: tmp0/nrm0(x)
# T0 term:
if (p > -1):
# Jacobian and Hessian:
J1 = J[0](x0h)
J2 = J[1](x0h)
H11 = H[0](x0h)
H12 = H[1](x0h)
H22 = H[2](x0h)
# Compute v and its first derivatives:
P = np.cross(J1, J2)
dP1 = np.cross(H11, J2) + np.cross(J1, H12)
dP2 = np.cross(H12, J2) + np.cross(J1, H22)
v = norm(P)
dv1 = 1/v * P @ dP1
dv2 = 1/v * P @ dP2
# Compute the coefficients:
a1 = eh @ H11
a2 = 2 * (eh @ H12)
a3 = eh @ H22
c1 = J1 @ H11
c2 = 2 * (J1 @ H12) + J2 @ H11
c3 = 2 * (J2 @ H12) + J1 @ H22
c4 = J2 @ H22
# Term with v':
dx1 = lambda x: x[0] - x0h[0]
dx2 = lambda x: x[1] - x0h[1]
dv = lambda x: dv1*dx1(x) + dv2*dx2(x)
T0v = lambda x: dv(x)/nrm0(x)
# Terms with a's:
Ta1 = lambda x: (-h*v/2*a1) * dx1(x)**2/nrm0(x)**3
Ta2 = lambda x: (-h*v/2*a2) * dx1(x)*dx2(x)/nrm0(x)**3
Ta3 = lambda x: (-h*v/2*a3) * dx2(x)**2/nrm0(x)**3
Ta = lambda x: Ta1(x) + Ta2(x) + Ta3(x)
# Terms with c's:
Tc1 = lambda x: (-v/2*c1) * dx1(x)**3/nrm0(x)**3
Tc2 = lambda x: (-v/2*c2) * dx1(x)**2*dx2(x)/nrm0(x)**3
Tc3 = lambda x: (-v/2*c3) * dx1(x)*dx2(x)**2/nrm0(x)**3
Tc4 = lambda x: (-v/2*c4) * dx2(x)**3/nrm0(x)**3
Tc = lambda x: Tc1(x) + Tc2(x) + Tc3(x) + Tc4(x)
# Assemble:
T0 = lambda x: T0v(x) + Ta(x) + Tc(x)
# T1 term:
if (p > 0):
# Compute second derivatives of v:
dP11 = 2 * np.cross(H11, H12)
dP12 = np.cross(H11, H22) + np.cross(H12, H12)
dP22 = 2 * np.cross(H12, H22)
dv11 = 1/v**2 * (v * dP1 @ dP1 + v * P @ dP11 - dv1 * P @ dP1)
dv12 = 1/v**2 * (v * dP1 @ dP2 + v * P @ dP12 - dv2 * P @ dP1)
dv22 = 1/v**2 * (v * dP2 @ dP2 + v * P @ dP22 - dv2 * P @ dP2)
# Compute the coefficients:
d1 = 1/4 * (H11 @ H11)
d2 = H11 @ H12
d3 = 1/2 * (H11 @ H22) + H12 @ H12
d4 = H22 @ H12
d5 = 1/4 * (H22 @ H22)
e1 = -1/2 * a1*dv1
e2 = -1/2 * (a2*dv1 + a1*dv2)
e3 = -1/2 * (a3*dv1 + a2*dv2)
e4 = -1/2 * a3*dv2
f1 = 3/8 * v * a1**2
f2 = 3/8 * v * 2*a1*a2
f3 = 3/8 * v * (a2**2 + 2*a1*a3)
f4 = 3/8 * v * 2*a2*a3
f5 = 3/8 * v * a3**2
g1 = -1/2 * (c1*dv1 + d1*v)
g2 = -1/2 * (c2*dv1 + c1*dv2 + d2*v)
g3 = -1/2 * (c3*dv1 + c2*dv2 + d3*v)
g4 = -1/2 * (c4*dv1 + c3*dv2 + d4*v)
g5 = -1/2 * (c4*dv2 + d5*v)
h1 = 3/8 * v * c1**2
h2 = 3/8 * v * 2*c1*c2
h3 = 3/8 * v * (c2**2 + 2*c1*c3)
h4 = 3/8 * v * (2*c1*c4 + 2*c2*c3)
h5 = 3/8 * v * (c3**2 + 2*c2*c4)
h6 = 3/8 * v * 2*c3*c4
h7 = 3/8 * v * c4**2
# Tem with v'':
dx1 = lambda x: x[0] - x0h[0]
dx2 = lambda x: x[1] - x0h[1]
d2v = lambda x: 1/2*dv11*dx1(x)**2 + 1/2*dv22*dx2(x)**2
T1v = lambda x: (d2v(x) + dv12*dx1(x)*dx2(x))/nrm0(x)
# Terms with e's:
Te1 = lambda x: (h*e1) * dx1(x)**3/nrm0(x)**3
Te2 = lambda x: (h*e2) * dx1(x)**2*dx2(x)/nrm0(x)**3
Te3 = lambda x: (h*e3) * dx1(x)*dx2(x)**2/nrm0(x)**3
Te4 = lambda x: (h*e4) * dx2(x)**3/nrm0(x)**3
Te = lambda x: Te1(x) + Te2(x) + Te3(x) + Te4(x)
# Terms with f's:
Tf1 = lambda x: (h**2*f1) * dx1(x)**4/nrm0(x)**5
Tf2 = lambda x: (h**2*f2) * dx1(x)**3*dx2(x)/nrm0(x)**5
Tf3 = lambda x: (h**2*f3) * dx1(x)**2*dx2(x)**2/nrm0(x)**5
Tf4 = lambda x: (h**2*f4) * dx1(x)*dx2(x)**3/nrm0(x)**5
Tf5 = lambda x: (h**2*f5) * dx2(x)**4/nrm0(x)**5
Tf = lambda x: Tf1(x) + Tf2(x) + Tf3(x) + Tf4(x) + Tf5(x)
# Terms with g's:
Tg1 = lambda x: g1 * dx1(x)**4/nrm0(x)**3
Tg2 = lambda x: g2 * dx1(x)**3*dx2(x)/nrm0(x)**3
Tg3 = lambda x: g3 * dx1(x)**2*dx2(x)**2/nrm0(x)**3
Tg4 = lambda x: g4 * dx1(x)*dx2(x)**3/nrm0(x)**3
Tg5 = lambda x: g5 * dx2(x)**4/nrm0(x)**3
Tg = lambda x: Tg1(x) + Tg2(x) + Tg3(x) + Tg4(x) + Tg5(x)
# Terms with h's:
Th1 = lambda x: h1 * dx1(x)**6/nrm0(x)**5
Th2 = lambda x: h2 * dx1(x)**5*dx2(x)/nrm0(x)**5
Th3 = lambda x: h3 * dx1(x)**4*dx2(x)**2/nrm0(x)**5
Th4 = lambda x: h4 * dx1(x)**3*dx2(x)**3/nrm0(x)**5
Th5 = lambda x: h5 * dx1(x)**2*dx2(x)**4/nrm0(x)**5
Th6 = lambda x: h6 * dx1(x)*dx2(x)**5/nrm0(x)**5
Th7 = lambda x: h7 * dx2(x)**6/nrm0(x)**5
Th15 = lambda x: Th1(x) + Th2(x) + Th3(x) + Th4(x) + Th5(x)
Th67 = lambda x: Th6(x) + Th7(x)
Th = lambda x: Th15(x) + Th67(x)
# Assemble:
T1 = lambda x: T1v(x) + Te(x) + Tf(x) + Tg(x) + Th(x)
# Regularized integrand:
if (p == -1):
reg_func = lambda x: sing_func(x) - Tn1(x)
elif (p == 0):
reg_func = lambda x: sing_func(x) - Tn1(x) - T0(x)
elif (p == 1):
reg_func = lambda x: sing_func(x) - Tn1(x) - T0(x) - T1(x)
# 2D Gauss quadrature:
x, w = np.polynomial.legendre.leggauss(n)
W = 1/8 * np.outer(w*(1 + x), w)
x1 = 1/2 * np.outer(1 - x, np.ones(n))
x2 = 1/4 * np.outer(1 + x, 1 - x)
X = np.array([x1, x2])
Ireg = np.sum(W * reg_func(X))
return Ireg
def compute_In1(x0h, h, J, n, quad, trans, tol):
# Note: the variable v belows correponds to psi in [1].
# Exact quadrature:
if (quad == 'exact'):
# Compute triangle data:
J = np.vstack((J[0](x0h), J[1](x0h))).T
a1 = J @ np.array([0, 0])
a2 = J @ np.array([1, 0])
a3 = J @ np.array([0, 1])
tau1 = a2 - a1
tau2 = a3 - a2
tau3 = a1 - a3
tau1 = tau1/norm(tau1)
tau2 = tau2/norm(tau2)
tau3 = tau3/norm(tau3)
n = np.cross(a2 - a1, a3 - a2)
A = 0.5*norm(n)
n /= (2*A)
nu1 = np.cross(tau1, n)
nu2 = np.cross(tau2, n)
nu3 = np.cross(tau3, n)
nu1 = nu1/norm(nu1)
nu2 = nu2/norm(nu2)
nu3 = nu3/norm(nu3)
# Antiderivative:
if (h < tol):
R = lambda t,s,h: np.arcsinh(t/s)
else:
R0 = lambda t,s,h: np.arcsinh(t/np.sqrt(s**2 + h**2))
R1 = lambda t,s,h: np.arctan(t/s)
tmp0 = lambda t,s,h: s**2 + h**2 + 1j*t*s
tmp1 = lambda t,s,h: h * np.sqrt(s**2 + h**2 + t**2)
tmp2 = lambda t,s,h: np.arctanh(tmp0(t,s,h)/tmp1(t,s,h))
R2 = lambda t,s,h: np.imag(tmp2(t,s,h) - 1j*np.pi/2*sign_func(t))
R = lambda t,s,h: R0(t,s,h) - h/s * R1(t,s,h) - h/s * R2(t,s,h)
# Integrate on each edge:
In1 = 0
s1 = (a1 - J @ x0h) @ nu1
s2 = (a2 - J @ x0h) @ nu2
s3 = (a3 - J @ x0h) @ nu3
if (abs(s1) > tol):
t1p = (a2 - J @ x0h) @ tau1
t1m = (a1 - J @ x0h) @ tau1
In1 += s1 * (R(t1p, s1, h) - R(t1m, s1, h))
if (abs(s2) > tol):
t2p = (a3 - J @ x0h) @ tau2
t2m = (a2 - J @ x0h) @ tau2
In1 += s2 * (R(t2p, s2, h) - R(t2m, s2, h))
if (abs(s3) > tol):
t3p = (a1 - J @ x0h) @ tau3
t3m = (a3 - J @ x0h) @ tau3
In1 += s3 * (R(t3p, s3, h) - R(t3m, s3, h))
# Scale by area:
In1 *= norm(np.cross(J[:,0], J[:,1]))/(2*A)
# Numerical quadrature:
if (quad == 'numerical'):
# Points and weight for 1D Gauss quadrature on [-1,1]:
t0, w0 = np.polynomial.legendre.leggauss(n)
# Jacobian:
J1 = J[0](x0h)
J2 = J[1](x0h)
# Compute v:
P = np.cross(J1, J2)
v = norm(P)
# Integrals on each edge:
s1 = x0h[1]
s2 = np.sqrt(2)/2*(1 - x0h[0] - x0h[1])
s3 = x0h[0]
In1 = 0
if (abs(s1) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (abs(s1) <= 1e-2 and trans[0] == True):
t, scl = conf_func(t0, -1 + 2*x0h[0], 2*s1)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0] + (t + 1)/2
r2 = lambda t: -x0h[1]
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v:
tmp = lambda t: (np.sqrt(nrm(t)**2 + h**2) - h)/nrm(t)**2
g = lambda t: v*tmp(t)
In1 += s1/2 * (w @ g(t))
if (abs(s2) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s2 <= 1e-2 and trans[0] == True):
t, scl = conf_func(t0, -1 + 2*x0h[1], 2*s2)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: 1 - x0h[0] - (t + 1)/2
r2 = lambda t: -x0h[1] + (t + 1)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v:
tmp = lambda t: (np.sqrt(nrm(t)**2 + h**2) - h)/nrm(t)**2
g = lambda t: v*tmp(t)
In1 += np.sqrt(2)*s2/2 * (w @ g(t))
if (abs(s3) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s3 <= 1e-2 and trans[0] == True):
t, scl = conf_func(t0, -1 + 2*(1 - x0h[1]), 2*s3)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0]
r2 = lambda t: 1 - x0h[1] - (1 + t)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v:
tmp = lambda t: (np.sqrt(nrm(t)**2 + h**2) - h)/nrm(t)**2
g = lambda t: v*tmp(t)
In1 += s3/2 * (w @ g(t))
return In1
def compute_I0(x0h, h, eh, J, H, n, trans, tol):
# Note: the variable v belows correponds to psi in [1].
# Points and weight for 1D Gauss quadrature on [-1,1]:
t0, w0 = np.polynomial.legendre.leggauss(n)
# Jacobian and Hessian:
J1 = J[0](x0h)
J2 = J[1](x0h)
H11 = H[0](x0h)
H12 = H[1](x0h)
H22 = H[2](x0h)
# Compute v and its first derivatives:
P = np.cross(J1, J2)
v = norm(P)
dv1 = 1/v * (P @ (np.cross(H11, J2) + np.cross(J1, H12)))
dv2 = 1/v * (P @ (np.cross(H12, J2) + np.cross(J1, H22)))
# Compute the coefficients:
a1 = eh @ H11
a2 = 2 * (eh @ H12)
a3 = eh @ H22
A = np.array([a1, a2, a3])
c1 = J1 @ H11
c2 = 2 * (J1 @ H12) + J2 @ H11
c3 = 2 * (J2 @ H12) + J1 @ H22
c4 = J2 @ H22
C = np.array([c1, c2, c3, c4])
# Integrals on each edge:
s1 = x0h[1]
s2 = np.sqrt(2)/2*(1 - x0h[0] - x0h[1])
s3 = x0h[0]
I0 = 0
if (abs(s1) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s1 <= 1e-2 and trans[1] == True):
t, scl = conf_func(t0, -1 + 2*x0h[0], 2*s1)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0] + (t + 1)/2
r2 = lambda t: -x0h[1]
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v':
tmp0 = lambda t: nrm(t)*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**3
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
dv = lambda t: dv1*r1(t) + dv2*r2(t)
g = lambda t: dv(t)*tmp(t)
I0 += s1/2 * (w @ g(t))
# Terms with a's:
tmp0 = lambda t: nrm(t)**2
tmp1 = lambda t: 2*h*(h - np.sqrt(nrm(t)**2 + h**2))
tmp2 = lambda t: nrm(t)**4*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(3):
g = lambda t: r1(t)**(3-k-1)*r2(t)**k*tmp(t)
I0 += (-h*v/2*A[k]) * s1/2 * (w @ g(t))
# Terms with c's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I0 += (-v/2*C[k]) * s1/2 * (w @ g(t))
if (abs(s2) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s2 <= 1e-2 and trans[1] == True):
t, scl = conf_func(t0, -1 + 2*x0h[1], 2*s2)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: 1 - x0h[0] - (t + 1)/2
r2 = lambda t: -x0h[1] + (t + 1)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v':
tmp0 = lambda t: nrm(t)*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**3
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
dv = lambda t: dv1*r1(t) + dv2*r2(t)
g = lambda t: dv(t)*tmp(t)
I0 += np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with a's:
tmp0 = lambda t: nrm(t)**2
tmp1 = lambda t: 2*h*(h - np.sqrt(nrm(t)**2 + h**2))
tmp2 = lambda t: nrm(t)**4*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(3):
g = lambda t: r1(t)**(3-k-1)*r2(t)**k*tmp(t)
I0 += (-h*v/2*A[k]) * np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with c's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I0 += (-v/2*C[k]) * np.sqrt(2)*s2/2 * (w @ g(t))
if (abs(s3) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s3 <= 1e-2 and trans[1] == True):
t, scl = conf_func(t0, -1 + 2*(1 - x0h[1]), 2*s3)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0]
r2 = lambda t: 1 - x0h[1] - (1 + t)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v':
tmp0 = lambda t: nrm(t)*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**3
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
dv = lambda t: dv1*r1(t) + dv2*r2(t)
g = lambda t: dv(t)*tmp(t)
I0 += s3/2 * (w @ g(t))
# Terms with a's:
tmp0 = lambda t: nrm(t)**2
tmp1 = lambda t: 2*h*(h - np.sqrt(nrm(t)**2 + h**2))
tmp2 = lambda t: nrm(t)**4*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(3):
g = lambda t: r1(t)**(3-k-1)*r2(t)**k*tmp(t)
I0 += (-h*v/2*A[k]) * s3/2 * (w @ g(t))
# Terms with c's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I0 += (-v/2*C[k]) * s3/2 * (w @ g(t))
return I0
def compute_I1(x0h, h, eh, J, H, n, trans, tol):
# Note: the variable v belows correponds to psi in [1].
# Points and weight for 1D Gauss quadrature on [-1,1]:
t0, w0 = np.polynomial.legendre.leggauss(n)
# Jacobian and Hessian:
J1 = J[0](x0h)
J2 = J[1](x0h)
H11 = H[0](x0h)
H12 = H[1](x0h)
H22 = H[2](x0h)
# Compute v and its 2nd derivatives:
P = np.cross(J1, J2)
dP1 = np.cross(H11, J2) + np.cross(J1, H12)
dP2 = np.cross(H12, J2) + np.cross(J1, H22)
dP11 = 2 * np.cross(H11, H12)
dP12 = np.cross(H11, H22) + np.cross(H12, H12)
dP22 = 2 * np.cross(H12, H22)
v = norm(P)
dv1 = 1/v * P @ dP1
dv2 = 1/v * P @ dP2
dv11 = 1/v**2 * (v * dP1 @ dP1 + v * P @ dP11 - dv1 * P @ dP1)
dv12 = 1/v**2 * (v * dP1 @ dP2 + v * P @ dP12 - dv2 * P @ dP1)
dv22 = 1/v**2 * (v * dP2 @ dP2 + v * P @ dP22 - dv2 * P @ dP2)
# Compute the coefficients:
a1 = eh @ H11
a2 = 2 * (eh @ H12)
a3 = eh @ H22
c1 = J1 @ H11
c2 = 2 * (J1 @ H12) + J2 @ H11
c3 = 2 * (J2 @ H12) + J1 @ H22
c4 = J2 @ H22
d1 = 1/4 * (H11 @ H11)
d2 = H11 @ H12
d3 = 1/2 * (H11 @ H22) + H12 @ H12
d4 = H22 @ H12
d5 = 1/4 * (H22 @ H22)
e1 = -1/2 * a1*dv1
e2 = -1/2 * (a2*dv1 + a1*dv2)
e3 = -1/2 * (a3*dv1 + a2*dv2)
e4 = -1/2 * a3*dv2
E = np.array([e1, e2, e3, e4])
f1 = 3/8 * v * a1**2
f2 = 3/8 * v * 2*a1*a2
f3 = 3/8 * v * (a2**2 + 2*a1*a3)
f4 = 3/8 * v * 2*a2*a3
f5 = 3/8 * v * a3**2
F = np.array([f1, f2, f3, f4, f5])
g1 = -1/2 * (c1*dv1 + d1*v)
g2 = -1/2 * (c2*dv1 + c1*dv2 + d2*v)
g3 = -1/2 * (c3*dv1 + c2*dv2 + d3*v)
g4 = -1/2 * (c4*dv1 + c3*dv2 + d4*v)
g5 = -1/2 * (c4*dv2 + d5*v)
G = np.array([g1, g2, g3, g4, g5])
h1 = 3/8 * v * c1**2
h2 = 3/8 * v * 2*c1*c2
h3 = 3/8 * v * (c2**2 + 2*c1*c3)
h4 = 3/8 * v * (2*c1*c4 + 2*c2*c3)
h5 = 3/8 * v * (c3**2 + 2*c2*c4)
h6 = 3/8 * v * 2*c3*c4
h7 = 3/8 * v * c4**2
H = np.array([h1, h2, h3, h4, h5, h6, h7])
# Integrals on each edge:
s1 = x0h[1]
s2 = np.sqrt(2)/2*(1 - x0h[0] - x0h[1])
s3 = x0h[0]
I1 = 0
if (abs(s1) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s1 <= 1e-2 and trans[2] == True):
t, scl = conf_func(t0, -1 + 2*x0h[0], 2*s1)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0] + (t + 1)/2
r2 = lambda t: -x0h[1]
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v'':
tmp0 = lambda t: 2*h**3 - 2*h**2*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**4
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
d2v = lambda t: 1/2*(dv11*r1(t)**2 + dv22*r2(t)**2) + dv12*r1(t)*r2(t)
g = lambda t: d2v(t)*tmp(t)
I1 += s1/2 * (w @ g(t))
# Terms with e's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I1 += (h*E[k]) * s1/2 * (w @ g(t))
# Terms with f's:
tmp0 = lambda t: 8*h**4 + 12*h**2*nrm(t)**2 + 3*nrm(t)**4
tmp1 = lambda t: -8*h*(nrm(t)**2 + h**2)**(3/2)
tmp2 = lambda t: 3*nrm(t)**6*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += (h**2*F[k]) * s1/2 * (w @ g(t))
# Terms with g's:
tmp0 = lambda t: -8*h**4 - 4*h**2*nrm(t)**2 + nrm(t)**4
tmp1 = lambda t: 8*h**3*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**6*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += G[k] * s1/2 * (w @ g(t))
# Terms with h's (1):
tmp0 = lambda t: -16*h**6 - 24*h**4*nrm(t)**2
tmp1 = lambda t: -6*h**2*nrm(t)**4 + nrm(t)**6
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * s1/2 * (w @ g(t))
# Terms with h's (2):
tmp0 = lambda t: 16*h**5*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: 16*h**3*nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * s1/2 * (w @ g(t))
if (abs(s2) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s2 <= 1e-2 and trans[2] == True):
t, scl = conf_func(t0, -1 + 2*x0h[1], 2*s2)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: 1 - x0h[0] - (t + 1)/2
r2 = lambda t: -x0h[1] + (t + 1)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v'':
tmp0 = lambda t: 2*h**3 - 2*h**2*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**4
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
d2v = lambda t: 1/2*(dv11*r1(t)**2 + dv22*r2(t)**2) + dv12*r1(t)*r2(t)
g = lambda t: d2v(t)*tmp(t)
I1 += np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with e's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I1 += (h*E[k]) * np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with f's:
tmp0 = lambda t: 8*h**4 + 12*h**2*nrm(t)**2 + 3*nrm(t)**4
tmp1 = lambda t: -8*h*(nrm(t)**2 + h**2)**(3/2)
tmp2 = lambda t: 3*nrm(t)**6*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += (h**2*F[k]) * np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with g's:
tmp0 = lambda t: -8*h**4 - 4*h**2*nrm(t)**2 + nrm(t)**4
tmp1 = lambda t: 8*h**3*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**6*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += G[k] * np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with h's (1):
tmp0 = lambda t: -16*h**6 - 24*h**4*nrm(t)**2
tmp1 = lambda t: -6*h**2*nrm(t)**4 + nrm(t)**6
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * np.sqrt(2)*s2/2 * (w @ g(t))
# Terms with h's (2):
tmp0 = lambda t: 16*h**5*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: 16*h**3*nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * np.sqrt(2)*s2/2 * (w @ g(t))
if (abs(s3) > tol):
# Transplanted Gauss quadrature if near-vertex:
if (s3 <= 1e-2 and trans[2] == True):
t, scl = conf_func(t0, -1 + 2*(1 - x0h[1]), 2*s3)
w = w0 * scl
else:
t = t0
w = w0
# Paramatretization and norm:
r1 = lambda t: -x0h[0]
r2 = lambda t: 1 - x0h[1] - (1 + t)/2
nrm0 = lambda t: (J1[0]*r1(t) + J2[0]*r2(t))**2
nrm1 = lambda t: (J1[1]*r1(t) + J2[1]*r2(t))**2
nrm2 = lambda t: (J1[2]*r1(t) + J2[2]*r2(t))**2
nrm = lambda t: np.sqrt(nrm0(t) + nrm1(t) + nrm2(t))
# Term with v'':
tmp0 = lambda t: 2*h**3 - 2*h**2*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**4
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
d2v = lambda t: 1/2*(dv11*r1(t)**2 + dv22*r2(t)**2) + dv12*r1(t)*r2(t)
g = lambda t: d2v(t)*tmp(t)
I1 += s3/2 * (w @ g(t))
# Terms with e's:
tmp0 = lambda t: (3*h**2*nrm(t) + nrm(t)**3)/np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: -3*h**2*np.arcsinh(nrm(t)/h) if h > 0 else 0
tmp2 = lambda t: 2*nrm(t)**5
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(4):
g = lambda t: r1(t)**(4-k-1)*r2(t)**k*tmp(t)
I1 += (h*E[k]) * s3/2 * (w @ g(t))
# Terms with f's:
tmp0 = lambda t: 8*h**4 + 12*h**2*nrm(t)**2 + 3*nrm(t)**4
tmp1 = lambda t: -8*h*(nrm(t)**2 + h**2)**(3/2)
tmp2 = lambda t: 3*nrm(t)**6*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += (h**2*F[k]) * s3/2 * (w @ g(t))
# Terms with g's:
tmp0 = lambda t: -8*h**4 - 4*h**2*nrm(t)**2 + nrm(t)**4
tmp1 = lambda t: 8*h**3*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**6*np.sqrt(nrm(t)**2 + h**2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(5):
g = lambda t: r1(t)**(5-k-1)*r2(t)**k*tmp(t)
I1 += G[k] * s3/2 * (w @ g(t))
# Terms with h's (1):
tmp0 = lambda t: -16*h**6 - 24*h**4*nrm(t)**2
tmp1 = lambda t: -6*h**2*nrm(t)**4 + nrm(t)**6
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * s3/2 * (w @ g(t))
# Terms with h's (2):
tmp0 = lambda t: 16*h**5*np.sqrt(nrm(t)**2 + h**2)
tmp1 = lambda t: 16*h**3*nrm(t)**2*np.sqrt(nrm(t)**2 + h**2)
tmp2 = lambda t: 3*nrm(t)**8*(nrm(t)**2 + h**2)**(3/2)
tmp = lambda t: (tmp0(t) + tmp1(t))/tmp2(t)
for k in range(7):
g = lambda t: r1(t)**(7-k-1)*r2(t)**k*tmp(t)
I1 += H[k] * s3/2 * (w @ g(t))
return I1
def sign_func(x):
if (x >= 0): return 1
else: return -1
def conf_func(z, mu, nu):
a = np.arcsinh((1 - mu)/nu)
b = np.arcsinh((1 + mu)/nu)
g = mu + nu * np.sinh((a + b)*(z - 1)/2 + a)
dg = nu * (a + b)/2 * np.cosh((a + b)*(z - 1)/2 + a)
return g, dg
def mul_func(J, X, n):
M = np.zeros([3, n, n])
for i in range(n):
for j in range(n):
M[:, i, j] = J @ np.array([X[0][i,j], X[1][i,j]])
return M
def map_func(A):
U = basis_func()
Fx = lambda x: sum(U[k](x)*A[k,0] for k in range(6))
Fy = lambda x: sum(U[k](x)*A[k,1] for k in range(6))
Fz = lambda x: sum(U[k](x)*A[k,2] for k in range(6))
F = lambda x: np.array([Fx(x), Fy(x), Fz(x)])
return F
def map_jac(A):
G = basis_grad()
J1x = lambda x: sum(G[k](x)[0]*A[k,0] for k in range(6))
J1y = lambda x: sum(G[k](x)[0]*A[k,1] for k in range(6))
J1z = lambda x: sum(G[k](x)[0]*A[k,2] for k in range(6))
J1 = lambda x: np.array([J1x(x), J1y(x), J1z(x)])
J2x = lambda x: sum(G[k](x)[1]*A[k,0] for k in range(6))
J2y = lambda x: sum(G[k](x)[1]*A[k,1] for k in range(6))
J2z = lambda x: sum(G[k](x)[1]*A[k,2] for k in range(6))
J2 = lambda x: np.array([J2x(x), J2y(x), J2z(x)])
return J1, J2
def map_hess(A):
H = basis_hess()
H11x = lambda x: sum(H[k](x)[0,0]*A[k,0] for k in range(6))
H11y = lambda x: sum(H[k](x)[0,0]*A[k,1] for k in range(6))
H11z = lambda x: sum(H[k](x)[0,0]*A[k,2] for k in range(6))
H11 = lambda x: np.array([H11x(x), H11y(x), H11z(x)])
H12x = lambda x: sum(H[k](x)[0,1]*A[k,0] for k in range(6))
H12y = lambda x: sum(H[k](x)[0,1]*A[k,1] for k in range(6))
H12z = lambda x: sum(H[k](x)[0,1]*A[k,2] for k in range(6))
H12 = lambda x: np.array([H12x(x), H12y(x), H12z(x)])
H22x = lambda x: sum(H[k](x)[1,1]*A[k,0] for k in range(6))
H22y = lambda x: sum(H[k](x)[1,1]*A[k,1] for k in range(6))
H22z = lambda x: sum(H[k](x)[1,1]*A[k,2] for k in range(6))
H22 = lambda x: np.array([H22x(x), H22y(x), H22z(x)])
return H11, H12, H22
def basis_func():
l1 = lambda x: 1 - x[0] - x[1]
l2 = lambda x: x[0]
l3 = lambda x: x[1]
u1 = lambda x: l1(x)*(2*l1(x) - 1)
u2 = lambda x: l2(x)*(2*l2(x) - 1)
u3 = lambda x: l3(x)*(2*l3(x) - 1)
u4 = lambda x: 4*l1(x)*l2(x)
u5 = lambda x: 4*l2(x)*l3(x)
u6 = lambda x: 4*l1(x)*l3(x)
return u1, u2, u3, u4, u5, u6
def basis_grad():
G1 = lambda x: np.array([4*x[0] + 4*x[1] - 3, 4*x[0] + 4*x[1] - 3])
G2 = lambda x: np.array([4*x[0] - 1, 0])
G3 = lambda x: np.array([0, 4*x[1] - 1])
G4 = lambda x: np.array([-8*x[0] - 4*x[1] + 4, -4*x[0]])
G5 = lambda x: np.array([4*x[1], 4*x[0]])
G6 = lambda x: np.array([-4*x[1], -4*x[0] - 8*x[1] + 4])
return G1, G2, G3, G4, G5, G6
def basis_hess():
H1 = lambda x: np.array([[4, 4], [4, 4]])
H2 = lambda x: np.array([[4, 0], [0, 0]])
H3 = lambda x: np.array([[0, 0], [0, 4]])
H4 = lambda x: np.array([[-8, -4], [-4, 0]])
H5 = lambda x: np.array([[0, 4], [4, 0]])
H6 = lambda x: np.array([[0, -4], [-4, -8]])
return H1, H2, H3, H4, H5, H6 | 33,586 | 0 | 370 |
d12c9aa5242e06570ee168e82ad0341eb41cca71 | 6,574 | py | Python | Project/professional_activities.py | ellenjkr/LattesXML2PDF | 53a1f2ac7d5228400153ffbb0f7461acf78d0d2d | [
"MIT"
] | null | null | null | Project/professional_activities.py | ellenjkr/LattesXML2PDF | 53a1f2ac7d5228400153ffbb0f7461acf78d0d2d | [
"MIT"
] | null | null | null | Project/professional_activities.py | ellenjkr/LattesXML2PDF | 53a1f2ac7d5228400153ffbb0f7461acf78d0d2d | [
"MIT"
] | null | null | null | from Activities.advice_commissions_consulting import Advice_Commission_Consulting
from Activities.dir_mgmt import Direction_Management
from Activities.internship import Intership
from Activities.research import Research
from Activities.teaching import Teaching
from Activities.technical_scientific import TechnicalScientific
from Activities.training import Training
| 40.331288 | 187 | 0.723456 | from Activities.advice_commissions_consulting import Advice_Commission_Consulting
from Activities.dir_mgmt import Direction_Management
from Activities.internship import Intership
from Activities.research import Research
from Activities.teaching import Teaching
from Activities.technical_scientific import TechnicalScientific
from Activities.training import Training
class ProfessionalActivities():
def __init__(self, xml_file):
super(ProfessionalActivities, self).__init__()
self.xml_file = xml_file
self.professional_activities = self.get_professional_activities()
def get_year_range(self, bond_info):
start_month = bond_info.attrib['MES-INICIO']
start_year = bond_info.attrib['ANO-INICIO']
end_month = bond_info.attrib['MES-FIM']
end_year = bond_info.attrib['ANO-FIM']
start = f"{start_month}/{start_year}"
end = f"{end_month}/{end_year}"
year_range = f"{start} - {end}"
if year_range[-1] == "/": # If "ANO-FIM" == ""
year_range = year_range.replace(' /', ' ')
year_range += "Atual"
return year_range
def get_bonds_table_content(self, bonds):
# bonds = {'bond': [], 'other_occupation': [], 'hours': [], 'regime': [], 'year_range': [], 'other_info': []}
bonds_table_content = []
for pos, bond in enumerate(bonds['bond']):
bond_dict = {'year_range': bonds['year_range'][pos], 'content': None}
info_list = [bonds[key][pos] for key in bonds.keys() if key not in ['year_range', 'other_info'] and bonds[key][pos] != ""]
info_string = ", ".join(info_list)
bond_dict['content'] = info_string
if bonds['other_info'][pos] != "":
bond_dict['second_line'] = 'Outras informações'
bond_dict['second_line_content'] = bonds['other_info'][pos]
bonds_table_content.append(bond_dict)
return bonds_table_content
def get_bonds(self, xml_content):
bonds = {'bond': [], 'other_occupation': [], 'hours': [], 'regime': [], 'year_range': [], 'other_info': []}
# These other activities (other bonds) are placed on a different part of the resume
other_professional_activities = ['Membro de corpo editorial', 'Membro de comitê de assessoramento', 'Membro de comitê assessor', 'Revisor de periódico', 'Revisor de projeto de fomento']
bond_content = xml_content.findall(".//VINCULOS")
for bond_info in bond_content:
if bond_info.attrib['OUTRO-VINCULO-INFORMADO'] == "": # This attribute holds the other_professional_activities bonds
year_range = self.get_year_range(bond_info)
bond = f"Vínculo: {bond_info.attrib['TIPO-DE-VINCULO'].capitalize()}"
other_occupation = bond_info.attrib['OUTRO-ENQUADRAMENTO-FUNCIONAL-INFORMADO']
if other_occupation != "":
other_occupation = f"Enquadramento Funcional: {other_occupation}"
hours = bond_info.attrib['CARGA-HORARIA-SEMANAL']
if hours != "":
hours = f"Carga horária: {hours}"
regime = bond_info.attrib['FLAG-DEDICACAO-EXCLUSIVA']
if regime == 'SIM':
regime = 'Regime: Dedicação exclusiva'
else:
regime = ''
other_info = bond_info.attrib['OUTRAS-INFORMACOES']
bonds['bond'].append(bond)
bonds['other_occupation'].append(other_occupation)
bonds['hours'].append(hours)
bonds['regime'].append(regime)
bonds['year_range'].append(year_range)
bonds['other_info'].append(other_info)
else:
bond = bond_info.attrib['OUTRO-VINCULO-INFORMADO']
# Verify if the bond is one of the other_professional_activities, otherwise the bond is added
if bond not in other_professional_activities:
year_range = self.get_year_range(bond_info)
other_occupation = bond_info.attrib['OUTRO-ENQUADRAMENTO-FUNCIONAL-INFORMADO']
if other_occupation != "":
other_occupation = f"Enquadramento Funcional: {other_occupation}"
hours = bond_info.attrib['CARGA-HORARIA-SEMANAL']
if hours != "":
hours = f"Carga horária: {hours}"
regime = bond_info.attrib['FLAG-DEDICACAO-EXCLUSIVA']
if regime == 'SIM':
regime = 'Regime: Dedicação exclusiva'
else:
regime = ''
other_info = bond_info.attrib['OUTRAS-INFORMACOES']
bonds['bond'].append(f"Vínculo: {bond.capitalize()}")
bonds['other_occupation'].append(other_occupation)
bonds['hours'].append(hours)
bonds['regime'].append(regime)
bonds['year_range'].append(year_range)
bonds['other_info'].append(other_info)
if all(key_list == [] for key_list in bonds.values()): # If the whole dictionary is empty return nothing
return None
else:
bonds_table_content = self.get_bonds_table_content(bonds)
return bonds_table_content
def get_activities(self, xml_content):
activities_table_content = [] # List with the activities
for tag in xml_content:
if 'ATIVIDADES' in tag.tag: # If the tag is an activity
# Get the activities of the type specified by tha tag name
if tag.tag == 'ATIVIDADES-DE-PESQUISA-E-DESENVOLVIMENTO':
activities_table_content.extend(Research(tag).get_activities_list())
elif tag.tag == 'ATIVIDADES-DE-ESTAGIO':
activities_table_content.extend(Intership(tag).get_activities_list())
elif tag.tag == 'ATIVIDADES-DE-ENSINO':
activities_table_content.extend(Teaching(tag).get_activities_list())
elif tag.tag == 'ATIVIDADES-DE-DIRECAO-E-ADMINISTRACAO':
activities_table_content.extend(Direction_Management(tag).get_activities_list())
elif tag.tag == 'ATIVIDADES-DE-TREINAMENTO-MINISTRADO':
activities_table_content.extend(Training(tag).get_activities_list())
elif tag.tag == 'ATIVIDADES-DE-CONSELHO-COMISSAO-E-CONSULTORIA':
activities_table_content.extend(Advice_Commission_Consulting(tag).get_activities_list())
elif tag.tag == 'OUTRAS-ATIVIDADES-TECNICO-CIENTIFICA':
activities_table_content.extend(TechnicalScientific(tag).get_activities_list())
if activities_table_content != []:
return activities_table_content
else:
return None
def get_professional_activities(self):
# Find the tag
xml_path = 'ATUACAO-PROFISSIONAL'
xml_content = self.xml_file.findall(f".//{xml_path}")
all_professional_activities = []
for tag in xml_content:
bonds = self.get_bonds(tag)
if bonds is not None:
activities = self.get_activities(tag)
professional_activity = {'institution': None, 'Vínculo institucional': None, 'Atividades': None}
professional_activity['institution'] = tag.attrib['NOME-INSTITUICAO']
professional_activity['Vínculo institucional'] = bonds
professional_activity['Atividades'] = activities
all_professional_activities.append(professional_activity)
return all_professional_activities | 6,047 | 10 | 166 |
5e641ecf20f1d503885b0c2aade91d39570cefd9 | 2,277 | py | Python | src/quantum_util/topology.py | alchayward/quantum_util | 76f48e02760e080e70127dd089a4c4e8a6c732cf | [
"MIT"
] | null | null | null | src/quantum_util/topology.py | alchayward/quantum_util | 76f48e02760e080e70127dd089a4c4e8a6c732cf | [
"MIT"
] | null | null | null | src/quantum_util/topology.py | alchayward/quantum_util | 76f48e02760e080e70127dd089a4c4e8a6c732cf | [
"MIT"
] | null | null | null | # tools for analysing topologial quantities for various systems
import numpy as np
from scipy.integrate import quad, dblquad
from scipy import pi, log, imag
from quantum_util.operators import ParameterizedWavefunction
# 1D topology
class TorusState2D(object):
"""Docstring for TorusState2D.
Todo: Add some memorization and/or interpolation
"""
def __init__(self, state_fun, shape=None, ham_f=None):
"""TODO: to be defined1. """
self._state_f = state_fun
self.shape = shape if shape is not None else state_fun(0,0).shape
self.ham_f = ham_f
class TorusState1D(object):
"""Docstring for TorusState. """
def __init__(self):
"""TODO: to be defined1. """
pass
def polarization(wf, d_phi=1.0e-10):
"""
Polarization from Resta formula.
"""
L = states.shape[0]
X = diag(exp(-1.0j*linspace(0,2*pi,L)))
return -imag(log(det(conj(states.T) @ X @ states)))
# Where do these differ. And why does one involve inverting the state.
## Derivitive with respect to potential
# 2D topology
| 26.788235 | 74 | 0.634607 | # tools for analysing topologial quantities for various systems
import numpy as np
from scipy.integrate import quad, dblquad
from scipy import pi, log, imag
from quantum_util.operators import ParameterizedWavefunction
# 1D topology
class TorusState2D(object):
"""Docstring for TorusState2D.
Todo: Add some memorization and/or interpolation
"""
def __init__(self, state_fun, shape=None, ham_f=None):
"""TODO: to be defined1. """
self._state_f = state_fun
self.shape = shape if shape is not None else state_fun(0,0).shape
self.ham_f = ham_f
def __call__(self, theta_x, theta_y):
return self.state_f(theta_x, theta_y)
def chern_number():
class TorusState1D(object):
"""Docstring for TorusState. """
def __init__(self):
"""TODO: to be defined1. """
pass
def __call__(self, theta_x):
pass
def polarization(wf, d_phi=1.0e-10):
"""
Polarization from Resta formula.
"""
L = states.shape[0]
X = diag(exp(-1.0j*linspace(0,2*pi,L)))
return -imag(log(det(conj(states.T) @ X @ states)))
def integrate_d_pol(pol, max_d = .1):
diffs = imag(log(exp(1.0j*(pol[1:] - pol[:-1]))))
cum = np.cumsum([x if abs(x) < max_d else 0.0 for x in diffs ])
return np.concatenate((np.array([0]), cum))
# Where do these differ. And why does one involve inverting the state.
def current_dP(states, ham):
L = states.shape[0]
X = diag(exp(-1.0j*linspace(0, 2*pi,L)))
eX = conj(states.T) @ X @ states
dX = conj(states.T) @ ((X @ ham) - (ham @ X)) @ states
return 1.0j*trace(np.linalg.inv(eX) @ dX)
## Derivitive with respect to potential
def current_dphi(states, ham_f,d_phi= 1.0e-7, phi_0=0.0):
dh = ham_f(phi_0+d_phi) - ham_f(phi_0)
return -2*np.real(trace(conj(states.T) @ (dh) @ states)/d_phi)/(2*pi)
def curvature(psi_f):
def integrated_current():
pass
# 2D topology
def curvature_fukuie():
pass
def berry_curvature(wf, phi_x, phi_y, d_phi=1.0e-10):
return imag(log(dot(wf(phi), wf(phi+d_phi))))/d_phi
def chern_number(wf, d_phi=1.0e-10):
def bc(x, y):
return berry_curvature(wf, x, y, d_phi)
return dblquad(bc, 0, 2 * pi, lambda x: 0.0, lambda x: 2 * pi)[0]
| 908 | 0 | 264 |
2acc822c79652f7e9afd78c450db48f5e01422b1 | 14,948 | py | Python | dfirtrack_main/importer/file/csv_form_based.py | FabFaeb/dfirtrack | 6dd1f5d16a688ea921753512fbf38ec8865e4c48 | [
"MIT"
] | null | null | null | dfirtrack_main/importer/file/csv_form_based.py | FabFaeb/dfirtrack | 6dd1f5d16a688ea921753512fbf38ec8865e4c48 | [
"MIT"
] | null | null | null | dfirtrack_main/importer/file/csv_form_based.py | FabFaeb/dfirtrack | 6dd1f5d16a688ea921753512fbf38ec8865e4c48 | [
"MIT"
] | null | null | null | import csv
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from dfirtrack_config.models import SystemImporterFileCsvFormbasedConfigModel
from dfirtrack_main.importer.file.csv_check_data import check_config, check_file, check_row
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvFormbasedForm
from dfirtrack_main.importer.file.csv_messages import final_messages
from dfirtrack_main.importer.file.csv_set_system_attributes import case_attributes_form_based, company_attributes_form_based, ip_attributes, tag_attributes_form_based
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import System
from io import TextIOWrapper
@login_required(login_url="/login")
# deprecated, TODO: check for useful stuff regarding tag handling
#
#from dfirtrack.config import TAGLIST
#from dfirtrack.config import TAGPREFIX
#
# """
# - remove all tags for systems beginning with 'TAGPREFIX' (if there are any)
# - evaluate given CSV line by line (without first row)
# - check whether this line has relevant tags (leave loop if not)
# - add relevant tags to this system
# """
#
# # check TAGLIST (from settings.config) for empty list
# if not TAGLIST:
# messages.error(request, "No relevant tags defined. Check `TAGLIST` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGS_DEFINED.")
# return redirect('/system/')
# else:
# taglist = TAGLIST
#
# # check TAGPREFIX (from settings.config) for empty string
# if TAGPREFIX is "":
# messages.error(request, "No prefix string defined. Check `TAGPREFIX` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGPREFIX_DEFINED.")
# return redirect('/system/')
# # expand the string by an underscore
# else:
## tagprefix = TAGPREFIX + "_"
# tagprefix = TAGPREFIX + "-"
#
# # create tagaddlist to append for every new system
# tagaddlist = []
# for tag in taglist:
# tagaddlist.append(tagprefix + tag)
#
# """ remove all tags for systems beginning with 'TAGPREFIX' (if there are any) """
#
# # get all systems that have tags beginning with 'TAGPREFIX' | prefixtagsystems -> queryset
# prefixtagsystems=System.objects.filter(tag__tag_name__startswith=tagprefix)
#
# # iterate over systems in queryset | prefixtagsystem -> system object
# for prefixtagsystem in prefixtagsystems:
#
# # get all tags beginning with 'TAGPREFIX' that belong to the actual system | systemprefixtags -> queryset
# systemprefixtags=prefixtagsystem.tag.filter(tag_name__startswith=tagprefix)
#
# # iterate over queryset | systemprefixtag -> tag object
# for systemprefixtag in systemprefixtags:
# # delete all existing tags (the m2m relationship) beginning with 'TAGPREFIX' for this system (so that removed tags from csv will be removed as well)
# systemprefixtag.system_set.remove(prefixtagsystem)
#
# # get tags from csv
# tagcsvstring = row[9]
# if tagcsvstring == '':
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because systems without tags are not relevant
# continue
# else:
# # convert string (at whitespaces) to list
# tagcsvlist = tagcsvstring.split()
#
# # create empty list for mapping
# tagaddlist = []
# # check for relevant tags and add to list
# for tag in taglist:
# if tag in tagcsvlist:
# tagaddlist.append(tagprefix + tag)
#
# # check if tagaddlist is empty
# if not tagaddlist:
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because there are no relevant tags
# continue
#
# if not row[10]:
# # continue if there is an empty string
# pass
# else:
# # get object
# tag_error = Tag.objects.get(tag_name=tagprefix + 'Error')
# # add error tag to system
# tag_error.system_set.add(system)
#
# # iterate over tags in tagaddlist
# for tag_name in tagaddlist:
# # get object
# tag = Tag.objects.get(tag_name=tag_name)
# # add tag to system
# tag.system_set.add(system)
# # get tagcolor object
# tagcolor = Tagcolor.objects.get(tagcolor_name='primary')
#
# # create tag if needed
# tag, created = Tag.objects.get_or_create(tag_name=tag_name, tagcolor=tagcolor)
# # call logger if created
# if created == True:
# tag.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_TAG_CREATED")
# messages.success(request, 'Tag "' + tag.tag_name + '" created.')
#
# # add tag to system
# tag.system_set.add(system)
| 42.107042 | 166 | 0.582085 | import csv
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from dfirtrack_config.models import SystemImporterFileCsvFormbasedConfigModel
from dfirtrack_main.importer.file.csv_check_data import check_config, check_file, check_row
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvFormbasedForm
from dfirtrack_main.importer.file.csv_messages import final_messages
from dfirtrack_main.importer.file.csv_set_system_attributes import case_attributes_form_based, company_attributes_form_based, ip_attributes, tag_attributes_form_based
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import System
from io import TextIOWrapper
@login_required(login_url="/login")
def system(request):
# get config model
model = SystemImporterFileCsvFormbasedConfigModel.objects.get(system_importer_file_csv_formbased_config_name = 'SystemImporterFileCsvFormbasedConfig')
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " SYSTEM_IMPORTER_FILE_CSV_BEGAN")
# get systemcsv from request (no submitted file only relevant for tests, normally form enforces file submitting)
check_systemcsv = request.FILES.get('systemcsv', False)
# check request for systemcsv (file submitted)
if check_systemcsv:
# get text out of file (variable results from request object via file upload field)
systemcsv = TextIOWrapper(request.FILES['systemcsv'].file, encoding=request.encoding)
# read rows out of csv
rows = csv.reader(systemcsv, quotechar="'")
# check file for csv respectively some kind of text file
file_check = check_file(request, rows)
# leave system_importer_file_csv if file check throws errors
if not file_check:
return redirect(reverse('system_list'))
# jump to begin of file again after iterating in file check
systemcsv.seek(0)
""" prepare and start loop """
# set row_counter (needed for logger)
row_counter = 1
# set systems_created_counter (needed for messages)
systems_created_counter = 0
# set systems_updated_counter (needed for messages)
systems_updated_counter = 0
# set systems_skipped_counter (needed for messages)
systems_skipped_counter = 0
# iterate over rows
for row in rows:
# skip first row in case of headline
if row_counter == 1 and model.csv_headline is True:
# autoincrement row counter
row_counter += 1
# leave loop for headline row
continue
# check row for valid system values
continue_system_importer_file_csv = check_row(request, row, row_counter, model)
# leave loop for this row if there are invalid values
if continue_system_importer_file_csv:
# autoincrement row counter
row_counter += 1
continue
# get system name (decremented by one because index starts with zero: user provides 1 -> first column in CSV has index 0)
system_name = row[model.csv_column_system - 1]
# get all systems with this system_name
systemquery = System.objects.filter(system_name=system_name)
""" check how many systems were returned """
# if there is only one system
if len(systemquery) == 1:
# skip if system already exists (depending on CSV_SKIP_EXISTING_SYSTEM)
if model.csv_skip_existing_system:
# autoincrement counter
systems_skipped_counter += 1
# autoincrement row counter
row_counter += 1
# leave loop
continue
# modify existing system (depending on CSV_SKIP_EXISTING_SYSTEM)
elif not model.csv_skip_existing_system:
# get existing system object
system = System.objects.get(system_name=system_name)
# create form with request data
form = SystemImporterFileCsvFormbasedForm(request.POST, request.FILES, instance=system)
# change system
if form.is_valid():
# don't save form yet
system = form.save(commit=False)
# change mandatory meta attributes
system.system_modify_time = timezone.now()
system.system_modified_by_user_id = request.user
# save object
system.save()
# change many2many (classic 'form.save_m2m()' would remove existing relationships regardless config)
system = case_attributes_form_based(system, request.POST.getlist('case'), model)
system = company_attributes_form_based(system, request.POST.getlist('company'), model)
system = tag_attributes_form_based(system, request.POST.getlist('tag'), model)
# change ip addresses
if model.csv_choice_ip:
system = ip_attributes(system, request, row, row_counter, model)
# autoincrement systems_updated_counter
systems_updated_counter += 1
# call logger
system.logger(str(request.user), " SYSTEM_IMPORTER_FILE_CSV_SYSTEM_MODIFIED")
# if there is more than one system
elif len(systemquery) > 1:
messages.error(request, "System " + system_name + " already exists multiple times. Nothing was changed for this system.")
# if there is no system
else:
# create form with request data
form = SystemImporterFileCsvFormbasedForm(request.POST, request.FILES)
# create system
if form.is_valid():
# create new system object
system = System()
# don't save form yet
system = form.save(commit=False)
# add system_name from csv
system.system_name = system_name
# add mandatory meta attributes
system.system_modify_time = timezone.now()
system.system_created_by_user_id = request.user
system.system_modified_by_user_id = request.user
# save object
system.save()
# add many2many
system = case_attributes_form_based(system, request.POST.getlist('case'), model)
system = company_attributes_form_based(system, request.POST.getlist('company'), model)
system = tag_attributes_form_based(system, request.POST.getlist('tag'), model)
# add ip addresses
if model.csv_choice_ip:
system = ip_attributes(system, request, row, row_counter, model)
# autoincrement systems_created_counter
systems_created_counter += 1
# call logger
system.logger(str(request.user), " SYSTEM_IMPORTER_FILE_CSV_SYSTEM_CREATED")
# autoincrement row counter
row_counter += 1
# check request for systemcsv (file not submitted)
else:
# get empty form with default values
form = SystemImporterFileCsvFormbasedForm(initial={
'systemstatus': 2,
'analysisstatus': 1,
})
# show form again
return render(
request,
'dfirtrack_main/system/system_importer_file_csv_form_based.html',
{
'form': form,
}
)
# call final messages
final_messages(request, systems_created_counter, systems_updated_counter, systems_skipped_counter)
# call logger
debug_logger(str(request.user), " SYSTEM_IMPORTER_FILE_CSV_END")
return redirect(reverse('system_list'))
else:
# get config model
model = SystemImporterFileCsvFormbasedConfigModel.objects.get(system_importer_file_csv_formbased_config_name = 'SystemImporterFileCsvFormbasedConfig')
# check config before showing form
stop_system_importer_file_csv = check_config(request, model)
# leave system_importer_file_csv if variables caused errors
if stop_system_importer_file_csv:
return redirect(reverse('system_list'))
# show warning if existing systems will be updated
if not model.csv_skip_existing_system:
messages.warning(request, 'WARNING: Existing systems will be updated!')
# get empty form with default values
form = SystemImporterFileCsvFormbasedForm(initial={
'systemstatus': 2,
'analysisstatus': 1,
})
# call logger
debug_logger(str(request.user), " SYSTEM_IMPORTER_FILE_CSV_ENTERED")
# show form
return render(
request,
'dfirtrack_main/system/system_importer_file_csv_form_based.html',
{
'form': form,
}
)
# deprecated, TODO: check for useful stuff regarding tag handling
#
#from dfirtrack.config import TAGLIST
#from dfirtrack.config import TAGPREFIX
#
# """
# - remove all tags for systems beginning with 'TAGPREFIX' (if there are any)
# - evaluate given CSV line by line (without first row)
# - check whether this line has relevant tags (leave loop if not)
# - add relevant tags to this system
# """
#
# # check TAGLIST (from settings.config) for empty list
# if not TAGLIST:
# messages.error(request, "No relevant tags defined. Check `TAGLIST` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGS_DEFINED.")
# return redirect('/system/')
# else:
# taglist = TAGLIST
#
# # check TAGPREFIX (from settings.config) for empty string
# if TAGPREFIX is "":
# messages.error(request, "No prefix string defined. Check `TAGPREFIX` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGPREFIX_DEFINED.")
# return redirect('/system/')
# # expand the string by an underscore
# else:
## tagprefix = TAGPREFIX + "_"
# tagprefix = TAGPREFIX + "-"
#
# # create tagaddlist to append for every new system
# tagaddlist = []
# for tag in taglist:
# tagaddlist.append(tagprefix + tag)
#
# """ remove all tags for systems beginning with 'TAGPREFIX' (if there are any) """
#
# # get all systems that have tags beginning with 'TAGPREFIX' | prefixtagsystems -> queryset
# prefixtagsystems=System.objects.filter(tag__tag_name__startswith=tagprefix)
#
# # iterate over systems in queryset | prefixtagsystem -> system object
# for prefixtagsystem in prefixtagsystems:
#
# # get all tags beginning with 'TAGPREFIX' that belong to the actual system | systemprefixtags -> queryset
# systemprefixtags=prefixtagsystem.tag.filter(tag_name__startswith=tagprefix)
#
# # iterate over queryset | systemprefixtag -> tag object
# for systemprefixtag in systemprefixtags:
# # delete all existing tags (the m2m relationship) beginning with 'TAGPREFIX' for this system (so that removed tags from csv will be removed as well)
# systemprefixtag.system_set.remove(prefixtagsystem)
#
# # get tags from csv
# tagcsvstring = row[9]
# if tagcsvstring == '':
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because systems without tags are not relevant
# continue
# else:
# # convert string (at whitespaces) to list
# tagcsvlist = tagcsvstring.split()
#
# # create empty list for mapping
# tagaddlist = []
# # check for relevant tags and add to list
# for tag in taglist:
# if tag in tagcsvlist:
# tagaddlist.append(tagprefix + tag)
#
# # check if tagaddlist is empty
# if not tagaddlist:
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because there are no relevant tags
# continue
#
# if not row[10]:
# # continue if there is an empty string
# pass
# else:
# # get object
# tag_error = Tag.objects.get(tag_name=tagprefix + 'Error')
# # add error tag to system
# tag_error.system_set.add(system)
#
# # iterate over tags in tagaddlist
# for tag_name in tagaddlist:
# # get object
# tag = Tag.objects.get(tag_name=tag_name)
# # add tag to system
# tag.system_set.add(system)
# # get tagcolor object
# tagcolor = Tagcolor.objects.get(tagcolor_name='primary')
#
# # create tag if needed
# tag, created = Tag.objects.get_or_create(tag_name=tag_name, tagcolor=tagcolor)
# # call logger if created
# if created == True:
# tag.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_TAG_CREATED")
# messages.success(request, 'Tag "' + tag.tag_name + '" created.')
#
# # add tag to system
# tag.system_set.add(system)
| 9,216 | 0 | 22 |
c1012a0c816befadf95b8b196dc767c17064660e | 533 | py | Python | NewsProcessorServer/config.py | theodorachu/ihavenewsforyou | bc7be1952af5eee8f1ace92469a1364b974ab984 | [
"MIT"
] | 1 | 2017-01-26T01:25:03.000Z | 2017-01-26T01:25:03.000Z | NewsProcessorServer/config.py | theodorachu/ihavenewsforyou | bc7be1952af5eee8f1ace92469a1364b974ab984 | [
"MIT"
] | null | null | null | NewsProcessorServer/config.py | theodorachu/ihavenewsforyou | bc7be1952af5eee8f1ace92469a1364b974ab984 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
| 25.380952 | 84 | 0.771107 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] # required by Flask-SQLAlchemy
# OAUTH_CLIENT_ID = os.environ['OAUTH_CLIENT_ID']
# CLIENT_SECRET = os.environ['CLIENT_SECRET']
CORS_HEADERS = 'Content-Type'
class LocalConfig(Config):
DEBUG = True
class HerokuConfig(Config):
DEBUG = False
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = "postgresql://localhost/news_testing_db"
TESTING = True
| 0 | 378 | 92 |
45b7d1d77a8b4ef50beeefda60b7ff07d37f1fc6 | 2,678 | py | Python | analysis/prep/barriers/special/extract_snapping_dataset_outside_prev_region.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | analysis/prep/barriers/special/extract_snapping_dataset_outside_prev_region.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | analysis/prep/barriers/special/extract_snapping_dataset_outside_prev_region.py | astutespruce/sarp | 7ce503380440c47b762ed1a8efd1d3e3aab6605e | [
"MIT"
] | null | null | null | import os
from pathlib import Path
import warnings
import pygeos as pg
import geopandas as gp
from pyogrio import read_dataframe, write_dataframe
from analysis.constants import CRS, STATES
PREVIOUS_STATES = {
"AL",
"AR",
"AZ",
"CO",
"FL",
"GA",
"IA",
"KS",
"KY",
"LA",
"MO",
"MS",
"MT",
"NC",
"ND",
"NE",
"NM",
"OK",
"PR",
"SC",
"SD",
"TN",
"TX",
"UT",
"VA",
"WY",
}
NEW_STATES = sorted(set(STATES) - PREVIOUS_STATES)
warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*")
data_dir = Path("data")
src_dir = data_dir / "barriers/source"
out_dir = Path("/tmp/sarp")
if not out_dir.exists():
os.makedirs(out_dir)
### Create initial version of snapping dataset for states outside previous region (SE + R2 / R6)
# load NABD dams (drop any that have duplicate NIDID)
nabd = (
read_dataframe(src_dir / "NABD_V2_beta/NABD_V2_beta.shp", columns=["NIDID"])
.dropna(subset=["NIDID"])
.to_crs(CRS)
.dropna(subset=["NIDID"])
.drop_duplicates(subset=["NIDID"], keep=False)
.set_index("NIDID")
)
# load previously snapped dams
prev = gp.read_feather(src_dir / "manually_snapped_dams.feather",)
prev["SourceState"] = prev.SARPID.str[:2]
prev.ManualReview = prev.ManualReview.astype("uint8")
prev = prev.loc[
prev.SourceState.isin(NEW_STATES) & prev.ManualReview.isin([4, 5, 13])
].copy()
# load latest dams downloaded from state-level feature services
# limited to non-SARP states
df = gp.read_feather(src_dir / "sarp_dams.feather")
df = df.loc[df.SourceState.isin(NEW_STATES)].drop_duplicates(
subset=["NIDID"], keep=False
)
df.ManualReview = df.ManualReview.fillna(0).astype("uint8")
df = df.join(prev[["ManualReview", "geometry"]], on="NIDID", rsuffix="_prev",).join(
nabd.geometry.rename("nabd_geometry"), on="NIDID"
)
# if previously reviewed, use that directly
ix = (df.ManualReview == 0) & df.geometry_prev.notnull()
df.loc[ix, "ManualReview"] = df.loc[ix].ManualReview_prev
df.loc[ix, "geometry"] = df.loc[ix].geometry_prev
# update location from NABD if within 5,000 meters
ix = (df.ManualReview == 0) & (
pg.distance(df.geometry.values.data, df.nabd_geometry.values.data) <= 5000
)
df.loc[ix, "ManualReview"] = 2
df.loc[ix, "geometry"] = df.loc[ix].nabd_geometry
df = df.drop(columns=["ManualReview_prev", "geometry_prev", "nabd_geometry"])
# drop anything that wasn't snapped
df = df.loc[df.ManualReview > 0].copy()
df.ManualReview = df.ManualReview.astype("uint8")
df.to_feather(src_dir / "snapped_outside_prev_v1.feather")
write_dataframe(df, src_dir / "snapped_outside_prev_v1.fgb")
| 25.028037 | 96 | 0.676998 | import os
from pathlib import Path
import warnings
import pygeos as pg
import geopandas as gp
from pyogrio import read_dataframe, write_dataframe
from analysis.constants import CRS, STATES
PREVIOUS_STATES = {
"AL",
"AR",
"AZ",
"CO",
"FL",
"GA",
"IA",
"KS",
"KY",
"LA",
"MO",
"MS",
"MT",
"NC",
"ND",
"NE",
"NM",
"OK",
"PR",
"SC",
"SD",
"TN",
"TX",
"UT",
"VA",
"WY",
}
NEW_STATES = sorted(set(STATES) - PREVIOUS_STATES)
warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*")
data_dir = Path("data")
src_dir = data_dir / "barriers/source"
out_dir = Path("/tmp/sarp")
if not out_dir.exists():
os.makedirs(out_dir)
### Create initial version of snapping dataset for states outside previous region (SE + R2 / R6)
# load NABD dams (drop any that have duplicate NIDID)
nabd = (
read_dataframe(src_dir / "NABD_V2_beta/NABD_V2_beta.shp", columns=["NIDID"])
.dropna(subset=["NIDID"])
.to_crs(CRS)
.dropna(subset=["NIDID"])
.drop_duplicates(subset=["NIDID"], keep=False)
.set_index("NIDID")
)
# load previously snapped dams
prev = gp.read_feather(src_dir / "manually_snapped_dams.feather",)
prev["SourceState"] = prev.SARPID.str[:2]
prev.ManualReview = prev.ManualReview.astype("uint8")
prev = prev.loc[
prev.SourceState.isin(NEW_STATES) & prev.ManualReview.isin([4, 5, 13])
].copy()
# load latest dams downloaded from state-level feature services
# limited to non-SARP states
df = gp.read_feather(src_dir / "sarp_dams.feather")
df = df.loc[df.SourceState.isin(NEW_STATES)].drop_duplicates(
subset=["NIDID"], keep=False
)
df.ManualReview = df.ManualReview.fillna(0).astype("uint8")
df = df.join(prev[["ManualReview", "geometry"]], on="NIDID", rsuffix="_prev",).join(
nabd.geometry.rename("nabd_geometry"), on="NIDID"
)
# if previously reviewed, use that directly
ix = (df.ManualReview == 0) & df.geometry_prev.notnull()
df.loc[ix, "ManualReview"] = df.loc[ix].ManualReview_prev
df.loc[ix, "geometry"] = df.loc[ix].geometry_prev
# update location from NABD if within 5,000 meters
ix = (df.ManualReview == 0) & (
pg.distance(df.geometry.values.data, df.nabd_geometry.values.data) <= 5000
)
df.loc[ix, "ManualReview"] = 2
df.loc[ix, "geometry"] = df.loc[ix].nabd_geometry
df = df.drop(columns=["ManualReview_prev", "geometry_prev", "nabd_geometry"])
# drop anything that wasn't snapped
df = df.loc[df.ManualReview > 0].copy()
df.ManualReview = df.ManualReview.astype("uint8")
df.to_feather(src_dir / "snapped_outside_prev_v1.feather")
write_dataframe(df, src_dir / "snapped_outside_prev_v1.fgb")
| 0 | 0 | 0 |
38880200e76e06d22e94df45480ca1fd7e5a9a82 | 904 | py | Python | deliverable_model/serving/remote_model/model_registry.py | lanSeFangZhou/deliverable_model | b01c502286850879bc3f9be1dd6f369ad1181d07 | [
"Apache-2.0"
] | 2 | 2020-08-20T04:26:58.000Z | 2021-01-04T10:06:31.000Z | deliverable_model/serving/remote_model/model_registry.py | lanSeFangZhou/deliverable_model | b01c502286850879bc3f9be1dd6f369ad1181d07 | [
"Apache-2.0"
] | 6 | 2019-12-12T03:13:43.000Z | 2021-11-10T20:08:18.000Z | deliverable_model/serving/remote_model/model_registry.py | lanSeFangZhou/deliverable_model | b01c502286850879bc3f9be1dd6f369ad1181d07 | [
"Apache-2.0"
] | 4 | 2019-11-15T09:56:01.000Z | 2020-05-08T10:23:00.000Z | from pathlib import Path
from deliverable_model.serving.remote_model.model_endpoint_base import ModelEndpointBase
_endpoint_type_registry = {}
| 29.16129 | 88 | 0.820796 | from pathlib import Path
from deliverable_model.serving.remote_model.model_endpoint_base import ModelEndpointBase
_endpoint_type_registry = {}
def get_endpoint_class_by_type(model_type) -> ModelEndpointBase:
global _endpoint_type_registry
model_loader_class = _endpoint_type_registry[model_type]
return model_loader_class
def get_endpoint_instance_by_type(model_type, metadata, config) -> ModelEndpointBase:
endpoint_class = get_endpoint_class_by_type(model_type)
model_load_func = getattr(endpoint_class, "from_config")
model_loader_instance = model_load_func(metadata, config)
return model_loader_instance
def register_model_loader(model_type, model_loader_class: ModelEndpointBase):
global _endpoint_type_registry
if model_loader_class in _endpoint_type_registry:
raise ValueError()
_endpoint_type_registry[model_type] = model_loader_class
| 687 | 0 | 69 |
8a047cd30341546b9165ab7b923c949d6ffdb963 | 479 | py | Python | scuole/counties/migrations/0004_auto_20170813_2350.py | texastribune/scuole | 8ab316ee50ef0d8e71b94b50dc889d10c6e83412 | [
"MIT"
] | 1 | 2019-03-12T04:30:02.000Z | 2019-03-12T04:30:02.000Z | scuole/counties/migrations/0004_auto_20170813_2350.py | texastribune/scuole | 8ab316ee50ef0d8e71b94b50dc889d10c6e83412 | [
"MIT"
] | 616 | 2017-08-18T21:15:39.000Z | 2022-03-25T11:17:10.000Z | scuole/counties/migrations/0004_auto_20170813_2350.py | texastribune/scuole | 8ab316ee50ef0d8e71b94b50dc889d10c6e83412 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-13 23:50
from __future__ import unicode_literals
from django.db import migrations, models
| 22.809524 | 49 | 0.638831 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-13 23:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('counties', '0003_auto_20170811_2045'),
]
operations = [
migrations.AlterField(
model_name='countycohorts',
name='enrolled_out_of_state_percent',
field=models.FloatField(null=True),
),
]
| 0 | 300 | 23 |
58ca962f1bbbcccd31a06aeb2de9c62035360259 | 75 | py | Python | ts_app/__main__.py | ngocneo/time-series-app | 910cae8a05e0ad7fb21aa3844ff9b1ff39e287e2 | [
"MIT"
] | null | null | null | ts_app/__main__.py | ngocneo/time-series-app | 910cae8a05e0ad7fb21aa3844ff9b1ff39e287e2 | [
"MIT"
] | null | null | null | ts_app/__main__.py | ngocneo/time-series-app | 910cae8a05e0ad7fb21aa3844ff9b1ff39e287e2 | [
"MIT"
] | null | null | null | from ts_app import run_in_cli
if __name__ == "__main__":
run_in_cli()
| 15 | 29 | 0.72 | from ts_app import run_in_cli
if __name__ == "__main__":
run_in_cli()
| 0 | 0 | 0 |
e009771b8dc62d1f5fc7dcde6bef84d2cc2d249c | 539 | py | Python | src/concurrency in python/python concurrecy/thead_monitor.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | src/concurrency in python/python concurrecy/thead_monitor.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | 3 | 2019-12-26T05:13:55.000Z | 2020-03-07T06:59:56.000Z | src/concurrency in python/python concurrecy/thead_monitor.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | import subprocess
monitor_deamons() | 28.368421 | 92 | 0.684601 | import subprocess
def monitor_deamons():
print ("starting monitor")
ps = subprocess.Popen(('ps', '-ef'), stdout=subprocess.PIPE)
grep = subprocess.Popen(('grep', '-v', 'grep'), stdin=ps.stdout, stdout=subprocess.PIPE)
ps.stdout.close() # Allow ps to receive a SIGPIPE if grep exits.
grep_daemon = subprocess.Popen(('grep', 'daemon'), stdin=grep.stdout)
grep.stdout.close() # Allow grep to receive a SIGPIPE if grep_daemon exits
output = grep_daemon.communicate()[0]
print( output)
monitor_deamons() | 480 | 0 | 23 |
63230fe10d1a1ba9882aa450100c2db362bf35bb | 4,934 | py | Python | metal/end_model/em_defaults.py | inimino/metal | 6d557e0956fb92c6d1afccd4b0089c5166d38565 | [
"Apache-2.0"
] | 437 | 2018-08-03T21:24:15.000Z | 2022-03-18T19:31:39.000Z | metal/end_model/em_defaults.py | inimino/metal | 6d557e0956fb92c6d1afccd4b0089c5166d38565 | [
"Apache-2.0"
] | 134 | 2018-08-02T14:33:44.000Z | 2021-12-22T06:39:42.000Z | metal/end_model/em_defaults.py | inimino/metal | 6d557e0956fb92c6d1afccd4b0089c5166d38565 | [
"Apache-2.0"
] | 80 | 2018-08-06T21:18:39.000Z | 2022-03-10T09:17:49.000Z | em_default_config = {
# GENERAL
"seed": None,
"verbose": True,
"show_plots": True,
# Network
# The first value is the output dim of the input module (or the sum of
# the output dims of all the input modules if multitask=True and
# multiple input modules are provided). The last value is the
# output dim of the head layer (i.e., the cardinality of the
# classification task). The remaining values are the output dims of
# middle layers (if any). The number of middle layers will be inferred
# from this list.
"layer_out_dims": [10, 2],
# Input layer configs
"input_layer_config": {
"input_relu": True,
"input_batchnorm": False,
"input_dropout": 0.0,
},
# Middle layer configs
"middle_layer_config": {
"middle_relu": True,
"middle_batchnorm": False,
"middle_dropout": 0.0,
},
# Can optionally skip the head layer completely, for e.g. running baseline
# models...
"skip_head": False,
# Device
"device": "cpu",
# TRAINING
"train_config": {
# Loss function config
"loss_fn_reduction": "mean",
# Display
"progress_bar": False,
# Dataloader
"data_loader_config": {"batch_size": 32, "num_workers": 1, "shuffle": True},
# Loss weights
"loss_weights": None,
# Train Loop
"n_epochs": 10,
# 'grad_clip': 0.0,
"l2": 0.0,
"validation_metric": "accuracy",
"validation_freq": 1,
"validation_scoring_kwargs": {},
# Evaluate dev for during training every this many epochs
# Optimizer
"optimizer_config": {
"optimizer": "adam",
"optimizer_common": {"lr": 0.01},
# Optimizer - SGD
"sgd_config": {"momentum": 0.9},
# Optimizer - Adam
"adam_config": {"betas": (0.9, 0.999)},
# Optimizer - RMSProp
"rmsprop_config": {}, # Use defaults
},
# LR Scheduler (for learning rate)
"lr_scheduler": "reduce_on_plateau",
# [None, 'exponential', 'reduce_on_plateau']
# 'reduce_on_plateau' uses checkpoint_metric to assess plateaus
"lr_scheduler_config": {
# Freeze learning rate initially this many epochs
"lr_freeze": 0,
# Scheduler - exponential
"exponential_config": {"gamma": 0.9}, # decay rate
# Scheduler - reduce_on_plateau
"plateau_config": {
"factor": 0.5,
"patience": 10,
"threshold": 0.0001,
"min_lr": 1e-4,
},
},
# Logger (see metal/logging/logger.py for descriptions)
"logger": True,
"logger_config": {
"log_unit": "epochs", # ['seconds', 'examples', 'batches', 'epochs']
"log_train_every": 1, # How often train metrics are calculated (optionally logged to TB)
"log_train_metrics": [
"loss"
], # Metrics to calculate and report every `log_train_every` units. This can include built-in and user-defined metrics.
"log_train_metrics_func": None, # A function or list of functions that map a model + train_loader to a dictionary of custom metrics
"log_valid_every": 1, # How frequently to evaluate on valid set (must be multiple of log_freq)
"log_valid_metrics": [
"accuracy"
], # Metrics to calculate and report every `log_valid_every` units; this can include built-in and user-defined metrics
"log_valid_metrics_func": None, # A function or list of functions that maps a model + valid_loader to a dictionary of custom metrics
},
# LogWriter/Tensorboard (see metal/logging/writer.py for descriptions)
"writer": None, # [None, "json", "tensorboard"]
"writer_config": { # Log (or event) file stored at log_dir/run_dir/run_name
"log_dir": None,
"run_dir": None,
"run_name": None,
"writer_metrics": None, # May specify a subset of metrics in metrics_dict to be written
"include_config": True, # If True, include model config in log
},
# Checkpointer (see metal/logging/checkpointer.py for descriptions)
"checkpoint": True, # If True, checkpoint models when certain conditions are met
"checkpoint_config": {
"checkpoint_best": True,
"checkpoint_every": None, # uses log_valid_unit for units; if not None, checkpoint this often regardless of performance
"checkpoint_metric": "accuracy", # Must be in metrics dict; assumes valid split unless appended with "train/"
"checkpoint_metric_mode": "max", # ['max', 'min']
"checkpoint_dir": "checkpoints",
"checkpoint_runway": 0,
},
},
}
| 43.280702 | 145 | 0.58715 | em_default_config = {
# GENERAL
"seed": None,
"verbose": True,
"show_plots": True,
# Network
# The first value is the output dim of the input module (or the sum of
# the output dims of all the input modules if multitask=True and
# multiple input modules are provided). The last value is the
# output dim of the head layer (i.e., the cardinality of the
# classification task). The remaining values are the output dims of
# middle layers (if any). The number of middle layers will be inferred
# from this list.
"layer_out_dims": [10, 2],
# Input layer configs
"input_layer_config": {
"input_relu": True,
"input_batchnorm": False,
"input_dropout": 0.0,
},
# Middle layer configs
"middle_layer_config": {
"middle_relu": True,
"middle_batchnorm": False,
"middle_dropout": 0.0,
},
# Can optionally skip the head layer completely, for e.g. running baseline
# models...
"skip_head": False,
# Device
"device": "cpu",
# TRAINING
"train_config": {
# Loss function config
"loss_fn_reduction": "mean",
# Display
"progress_bar": False,
# Dataloader
"data_loader_config": {"batch_size": 32, "num_workers": 1, "shuffle": True},
# Loss weights
"loss_weights": None,
# Train Loop
"n_epochs": 10,
# 'grad_clip': 0.0,
"l2": 0.0,
"validation_metric": "accuracy",
"validation_freq": 1,
"validation_scoring_kwargs": {},
# Evaluate dev for during training every this many epochs
# Optimizer
"optimizer_config": {
"optimizer": "adam",
"optimizer_common": {"lr": 0.01},
# Optimizer - SGD
"sgd_config": {"momentum": 0.9},
# Optimizer - Adam
"adam_config": {"betas": (0.9, 0.999)},
# Optimizer - RMSProp
"rmsprop_config": {}, # Use defaults
},
# LR Scheduler (for learning rate)
"lr_scheduler": "reduce_on_plateau",
# [None, 'exponential', 'reduce_on_plateau']
# 'reduce_on_plateau' uses checkpoint_metric to assess plateaus
"lr_scheduler_config": {
# Freeze learning rate initially this many epochs
"lr_freeze": 0,
# Scheduler - exponential
"exponential_config": {"gamma": 0.9}, # decay rate
# Scheduler - reduce_on_plateau
"plateau_config": {
"factor": 0.5,
"patience": 10,
"threshold": 0.0001,
"min_lr": 1e-4,
},
},
# Logger (see metal/logging/logger.py for descriptions)
"logger": True,
"logger_config": {
"log_unit": "epochs", # ['seconds', 'examples', 'batches', 'epochs']
"log_train_every": 1, # How often train metrics are calculated (optionally logged to TB)
"log_train_metrics": [
"loss"
], # Metrics to calculate and report every `log_train_every` units. This can include built-in and user-defined metrics.
"log_train_metrics_func": None, # A function or list of functions that map a model + train_loader to a dictionary of custom metrics
"log_valid_every": 1, # How frequently to evaluate on valid set (must be multiple of log_freq)
"log_valid_metrics": [
"accuracy"
], # Metrics to calculate and report every `log_valid_every` units; this can include built-in and user-defined metrics
"log_valid_metrics_func": None, # A function or list of functions that maps a model + valid_loader to a dictionary of custom metrics
},
# LogWriter/Tensorboard (see metal/logging/writer.py for descriptions)
"writer": None, # [None, "json", "tensorboard"]
"writer_config": { # Log (or event) file stored at log_dir/run_dir/run_name
"log_dir": None,
"run_dir": None,
"run_name": None,
"writer_metrics": None, # May specify a subset of metrics in metrics_dict to be written
"include_config": True, # If True, include model config in log
},
# Checkpointer (see metal/logging/checkpointer.py for descriptions)
"checkpoint": True, # If True, checkpoint models when certain conditions are met
"checkpoint_config": {
"checkpoint_best": True,
"checkpoint_every": None, # uses log_valid_unit for units; if not None, checkpoint this often regardless of performance
"checkpoint_metric": "accuracy", # Must be in metrics dict; assumes valid split unless appended with "train/"
"checkpoint_metric_mode": "max", # ['max', 'min']
"checkpoint_dir": "checkpoints",
"checkpoint_runway": 0,
},
},
}
| 0 | 0 | 0 |
a0ab8966ba729b58ef60fe30df62a2cbd9de6013 | 296 | py | Python | config.py | golnazads/ADSAuthorAffiliationService | 69a4f2e2ea481d7a26bac1c37d880ef691be8c72 | [
"MIT"
] | 1 | 2021-04-25T15:04:22.000Z | 2021-04-25T15:04:22.000Z | config.py | golnazads/ADSAuthorAffiliationService | 69a4f2e2ea481d7a26bac1c37d880ef691be8c72 | [
"MIT"
] | 21 | 2017-12-08T01:21:22.000Z | 2021-02-26T17:44:06.000Z | config.py | golnazads/ADSAuthorAffiliationService | 69a4f2e2ea481d7a26bac1c37d880ef691be8c72 | [
"MIT"
] | 2 | 2018-08-07T13:51:39.000Z | 2021-04-25T15:04:26.000Z |
LOG_STDOUT = True
AUTHOR_AFFILIATION_SOLRQUERY_URL = "http://api.adsabs.harvard.edu/v1/search/bigquery"
AUTHOR_AFFILATION_SERVICE_MAX_RECORDS_SOLR = 1000
# must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
| 26.909091 | 85 | 0.797297 |
LOG_STDOUT = True
AUTHOR_AFFILIATION_SOLRQUERY_URL = "http://api.adsabs.harvard.edu/v1/search/bigquery"
AUTHOR_AFFILATION_SERVICE_MAX_RECORDS_SOLR = 1000
# must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
| 0 | 0 | 0 |
97419f3dc72c64bccb2d2d4a114e256582785944 | 421 | py | Python | accounts/migrations/0006_auto_20180422_2204.py | Tomasz-Kluczkowski/Bnice | 75eb66a94a3bf3225691ed6802e674fbcf108571 | [
"MIT"
] | null | null | null | accounts/migrations/0006_auto_20180422_2204.py | Tomasz-Kluczkowski/Bnice | 75eb66a94a3bf3225691ed6802e674fbcf108571 | [
"MIT"
] | 60 | 2018-04-20T21:32:21.000Z | 2021-09-07T23:53:31.000Z | accounts/migrations/0006_auto_20180422_2204.py | Tomasz-Kluczkowski/Bnice | 75eb66a94a3bf3225691ed6802e674fbcf108571 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-04-22 21:04
from django.db import migrations, models
| 22.157895 | 79 | 0.605701 | # Generated by Django 2.0.3 on 2018-04-22 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20180419_2101'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile_photo',
field=models.ImageField(blank=True, upload_to='profiles/%Y/%m/%d'),
),
]
| 0 | 307 | 23 |
1749e798a32d13f7c0e13894de38f49b9ed27849 | 32,134 | py | Python | tests/python/modules/mesh_test.py | noorbeast/blender | 9dc69b3848b46f4fbf3daa3360a3b975f4e1565f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365 | 2015-02-10T15:10:55.000Z | 2022-03-03T15:50:51.000Z | tests/python/modules/mesh_test.py | noorbeast/blender | 9dc69b3848b46f4fbf3daa3360a3b975f4e1565f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45 | 2015-01-09T15:34:20.000Z | 2021-10-05T14:44:23.000Z | tests/python/modules/mesh_test.py | noorbeast/blender | 9dc69b3848b46f4fbf3daa3360a3b975f4e1565f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172 | 2015-01-25T15:16:53.000Z | 2022-01-31T08:25:36.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# A framework to run regression tests on mesh modifiers and operators based on howardt's mesh_ops_test.py
#
# General idea:
# A test is:
# Object mode
# Select <test_object>
# Duplicate the object
# Select the object
# Apply operation for each operation in <operations_stack> with given parameters
# (an operation is either a modifier or an operator)
# test_mesh = <test_object>.data
# run test_mesh.unit_test_compare(<expected object>.data)
# delete the duplicate object
#
# The words in angle brackets are parameters of the test, and are specified in
# the main class MeshTest.
#
# If the environment variable BLENDER_TEST_UPDATE is set to 1, the <expected_object>
# is updated with the new test result.
# Tests are verbose when the environment variable BLENDER_VERBOSE is set.
import bpy
import functools
import inspect
import os
# Output from this module and from blender itself will occur during tests.
# We need to flush python so that the output is properly interleaved, otherwise
# blender's output for one test will end up showing in the middle of another test...
print = functools.partial(print, flush=True)
class ModifierSpec:
"""
Holds a Generate or Deform or Physics modifier type and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end=0):
"""
Constructs a modifier spec.
:param modifier_name: str - name of object modifier, e.g. "myFirstSubsurfModif"
:param modifier_type: str - type of object modifier, e.g. "SUBSURF"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"quality" : 4}
:param frame_end: int - frame at which simulation needs to be baked or modifier needs to be applied.
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
class ParticleSystemSpec:
"""
Holds a Particle System modifier and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end: int):
"""
Constructs a particle system spec.
:param modifier_name: str - name of object modifier, e.g. "Particles"
:param modifier_type: str - type of object modifier, e.g. "PARTICLE_SYSTEM"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"seed" : 1}
:param frame_end: int - the last frame of the simulation at which the modifier is applied
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
class OperatorSpecEditMode:
"""
Holds one operator and its parameters.
"""
def __init__(self, operator_name: str, operator_parameters: dict, select_mode: str, selection: set):
"""
Constructs an OperatorSpecEditMode. Raises ValueError if selec_mode is invalid.
:param operator_name: str - name of mesh operator from bpy.ops.mesh, e.g. "bevel" or "fill"
:param operator_parameters: dict - {name : val} dictionary containing operator parameters.
:param select_mode: str - mesh selection mode, must be either 'VERT', 'EDGE' or 'FACE'
:param selection: set - set of vertices/edges/faces indices to select, e.g. [0, 9, 10].
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
if select_mode not in ['VERT', 'EDGE', 'FACE']:
raise ValueError("select_mode must be either {}, {} or {}".format('VERT', 'EDGE', 'FACE'))
self.select_mode = select_mode
self.selection = selection
class OperatorSpecObjectMode:
"""
Holds an object operator and its parameters. Helper class for DeformModifierSpec.
Needed to support operations in Object Mode and not Edit Mode which is supported by OperatorSpecEditMode.
"""
def __init__(self, operator_name: str, operator_parameters: dict):
"""
:param operator_name: str - name of the object operator from bpy.ops.object, e.g. "shade_smooth" or "shape_keys"
:param operator_parameters: dict - contains operator parameters.
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
class DeformModifierSpec:
"""
Holds a list of deform modifier and OperatorSpecObjectMode.
For deform modifiers which have an object operator
"""
def __init__(self, frame_number: int, modifier_list: list, object_operator_spec: OperatorSpecObjectMode = None):
"""
Constructs a Deform Modifier spec (for user input)
:param frame_number: int - the frame at which animated keyframe is inserted
:param modifier_list: ModifierSpec - contains modifiers
:param object_operator_spec: OperatorSpecObjectMode - contains object operators
"""
self.frame_number = frame_number
self.modifier_list = modifier_list
self.object_operator_spec = object_operator_spec
class MeshTest:
"""
A mesh testing class targeted at testing modifiers and operators on a single object.
It holds a stack of mesh operations, i.e. modifiers or operators. The test is executed using
the public method run_test().
"""
def __init__(
self,
test_name: str,
test_object_name: str,
expected_object_name: str,
operations_stack=None,
apply_modifiers=False,
do_compare=False,
threshold=None
):
"""
Constructs a MeshTest object. Raises a KeyError if objects with names expected_object_name
or test_object_name don't exist.
:param test_name: str - unique test name identifier.
:param test_object_name: str - Name of object of mesh type to run the operations on.
:param expected_object_name: str - Name of object of mesh type that has the expected
geometry after running the operations.
:param operations_stack: list - stack holding operations to perform on the test_object.
:param apply_modifiers: bool - True if we want to apply the modifiers right after adding them to the object.
- True if we want to apply the modifier to a list of modifiers, after some operation.
This affects operations of type ModifierSpec and DeformModifierSpec.
:param do_compare: bool - True if we want to compare the test and expected objects, False otherwise.
:param threshold : exponent: To allow variations and accept difference to a certain degree.
"""
if operations_stack is None:
operations_stack = []
for operation in operations_stack:
if not (isinstance(operation, ModifierSpec) or isinstance(operation, OperatorSpecEditMode)
or isinstance(operation, OperatorSpecObjectMode) or isinstance(operation, DeformModifierSpec)
or isinstance(operation, ParticleSystemSpec)):
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(DeformModifierSpec), type(ParticleSystemSpec),
type(operation)))
self.operations_stack = operations_stack
self.apply_modifier = apply_modifiers
self.do_compare = do_compare
self.threshold = threshold
self.test_name = test_name
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self.update = os.getenv('BLENDER_TEST_UPDATE') is not None
# Initialize test objects.
objects = bpy.data.objects
self.test_object = objects[test_object_name]
self.expected_object = objects[expected_object_name]
if self.verbose:
print("Found test object {}".format(test_object_name))
print("Found test object {}".format(expected_object_name))
# Private flag to indicate whether the blend file was updated after the test.
self._test_updated = False
def set_test_object(self, test_object_name):
"""
Set test object for the test. Raises a KeyError if object with given name does not exist.
:param test_object_name: name of test object to run operations on.
"""
objects = bpy.data.objects
self.test_object = objects[test_object_name]
def set_expected_object(self, expected_object_name):
"""
Set expected object for the test. Raises a KeyError if object with given name does not exist
:param expected_object_name: Name of expected object.
"""
objects = bpy.data.objects
self.expected_object = objects[expected_object_name]
def is_test_updated(self):
"""
Check whether running the test with BLENDER_TEST_UPDATE actually modified the .blend test file.
:return: Bool - True if blend file has been updated. False otherwise.
"""
return self._test_updated
def _set_parameters_impl(self, modifier, modifier_parameters, nested_settings_path, modifier_name):
"""
Doing a depth first traversal of the modifier parameters and setting their values.
:param: modifier: Of type modifier, its altered to become a setting in recursion.
:param: modifier_parameters : dict or sequence, a simple/nested dictionary of modifier parameters.
:param: nested_settings_path : list(stack): helps in tracing path to each node.
"""
if not isinstance(modifier_parameters, dict):
param_setting = None
for i, setting in enumerate(nested_settings_path):
# We want to set the attribute only when we have reached the last setting.
# Applying of intermediate settings is meaningless.
if i == len(nested_settings_path) - 1:
setattr(modifier, setting, modifier_parameters)
elif hasattr(modifier, setting):
param_setting = getattr(modifier, setting)
# getattr doesn't accept canvas_surfaces["Surface"], but we need to pass it to setattr.
if setting == "canvas_surfaces":
modifier = param_setting.active
else:
modifier = param_setting
else:
# Clean up first
bpy.ops.object.delete()
raise Exception("Modifier '{}' has no parameter named '{}'".
format(modifier_name, setting))
# It pops the current node before moving on to its sibling.
nested_settings_path.pop()
return
for key in modifier_parameters:
nested_settings_path.append(key)
self._set_parameters_impl(modifier, modifier_parameters[key], nested_settings_path, modifier_name)
if nested_settings_path:
nested_settings_path.pop()
def set_parameters(self, modifier, modifier_parameters):
"""
Wrapper for _set_parameters_util
"""
settings = []
modifier_name = modifier.name
self._set_parameters_impl(modifier, modifier_parameters, settings, modifier_name)
def _add_modifier(self, test_object, modifier_spec: ModifierSpec):
"""
Add modifier to object.
:param test_object: bpy.types.Object - Blender object to apply modifier on.
:param modifier_spec: ModifierSpec - ModifierSpec object with parameters
"""
bakers_list = ['CLOTH', 'SOFT_BODY', 'DYNAMIC_PAINT', 'FLUID']
scene = bpy.context.scene
scene.frame_set(1)
modifier = test_object.modifiers.new(modifier_spec.modifier_name,
modifier_spec.modifier_type)
if modifier is None:
raise Exception("This modifier type is already added on the Test Object, please remove it and try again.")
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(modifier_spec.modifier_name, modifier_spec.modifier_type))
# Special case for Dynamic Paint, need to toggle Canvas on.
if modifier.type == "DYNAMIC_PAINT":
bpy.ops.dpaint.type_toggle(type='CANVAS')
self.set_parameters(modifier, modifier_spec.modifier_parameters)
if modifier.type in bakers_list:
self._bake_current_simulation(test_object, modifier.name, modifier_spec.frame_end)
scene.frame_set(modifier_spec.frame_end)
def _bake_current_simulation(self, test_object, test_modifier_name, frame_end):
"""
FLUID: Bakes the simulation
SOFT BODY, CLOTH, DYNAMIC PAINT: Overrides the point_cache context and then bakes.
"""
for scene in bpy.data.scenes:
for modifier in test_object.modifiers:
if modifier.type == 'FLUID':
bpy.ops.fluid.bake_all()
break
elif modifier.type == 'CLOTH' or modifier.type == 'SOFT_BODY':
test_object.modifiers[test_modifier_name].point_cache.frame_end = frame_end
override_setting = modifier.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
elif modifier.type == 'DYNAMIC_PAINT':
dynamic_paint_setting = modifier.canvas_settings.canvas_surfaces.active
override_setting = dynamic_paint_setting.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
def _apply_particle_system(self, test_object, particle_sys_spec: ParticleSystemSpec):
"""
Applies Particle System settings to test objects
"""
bpy.context.scene.frame_set(1)
bpy.ops.object.select_all(action='DESELECT')
test_object.modifiers.new(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type)
settings_name = test_object.particle_systems.active.settings.name
particle_setting = bpy.data.particles[settings_name]
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type))
for param_name in particle_sys_spec.modifier_parameters:
try:
if param_name == "seed":
system_setting = test_object.particle_systems[particle_sys_spec.modifier_name]
setattr(system_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
else:
setattr(particle_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
if self.verbose:
print("\t set parameter '{}' with value '{}'".
format(param_name, particle_sys_spec.modifier_parameters[param_name]))
except AttributeError:
# Clean up first
bpy.ops.object.delete()
raise AttributeError("Modifier '{}' has no parameter named '{}'".
format(particle_sys_spec.modifier_type, param_name))
bpy.context.scene.frame_set(particle_sys_spec.frame_end)
test_object.select_set(True)
bpy.ops.object.duplicates_make_real()
test_object.select_set(True)
bpy.ops.object.join()
if self.apply_modifier:
self._apply_modifier(test_object, particle_sys_spec.modifier_name)
def _do_selection(self, mesh: bpy.types.Mesh, select_mode: str, selection: set):
"""
Do selection on a mesh
:param mesh: bpy.types.Mesh - input mesh
:param: select_mode: str - selection mode. Must be 'VERT', 'EDGE' or 'FACE'
:param: selection: set - indices of selection.
Example: select_mode='VERT' and selection={1,2,3} selects veritces 1, 2 and 3 of input mesh
"""
# deselect all
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.tool_settings.mesh_select_mode = (select_mode == 'VERT',
select_mode == 'EDGE',
select_mode == 'FACE')
items = (mesh.vertices if select_mode == 'VERT'
else mesh.edges if select_mode == 'EDGE'
else mesh.polygons if select_mode == 'FACE'
else None)
if items is None:
raise ValueError("Invalid selection mode")
for index in selection:
items[index].select = True
def _apply_operator_edit_mode(self, test_object, operator: OperatorSpecEditMode):
"""
Apply operator on test object.
:param test_object: bpy.types.Object - Blender object to apply operator on.
:param operator: OperatorSpecEditMode - OperatorSpecEditMode object with parameters.
"""
self._do_selection(test_object.data, operator.select_mode, operator.selection)
# Apply operator in edit mode.
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type=operator.select_mode)
mesh_operator = getattr(bpy.ops.mesh, operator.operator_name)
try:
retval = mesh_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.mesh has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied {}".format(operator))
bpy.ops.object.mode_set(mode='OBJECT')
def _apply_operator_object_mode(self, operator: OperatorSpecObjectMode):
"""
Applies the object operator.
"""
bpy.ops.object.mode_set(mode='OBJECT')
object_operator = getattr(bpy.ops.object, operator.operator_name)
try:
retval = object_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.object has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied operator {}".format(operator))
def _apply_deform_modifier(self, test_object, operation: list):
"""
param: operation: list: List of modifiers or combination of modifier and object operator.
"""
scene = bpy.context.scene
scene.frame_set(1)
bpy.ops.object.mode_set(mode='OBJECT')
modifier_operations_list = operation.modifier_list
modifier_names = []
object_operations = operation.object_operator_spec
for modifier_operations in modifier_operations_list:
if isinstance(modifier_operations, ModifierSpec):
self._add_modifier(test_object, modifier_operations)
modifier_names.append(modifier_operations.modifier_name)
if isinstance(object_operations, OperatorSpecObjectMode):
self._apply_operator_object_mode(object_operations)
scene.frame_set(operation.frame_number)
if self.apply_modifier:
for mod_name in modifier_names:
self._apply_modifier(test_object, mod_name)
def run_test(self):
"""
Apply operations in self.operations_stack on self.test_object and compare the
resulting mesh with self.expected_object.data
:return: bool - True if the test passed, False otherwise.
"""
self._test_updated = False
bpy.context.view_layer.objects.active = self.test_object
# Duplicate test object.
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
bpy.context.view_layer.objects.active = self.test_object
self.test_object.select_set(True)
bpy.ops.object.duplicate()
evaluated_test_object = bpy.context.active_object
evaluated_test_object.name = "evaluated_object"
if self.verbose:
print()
print(evaluated_test_object.name, "is set to active")
# Add modifiers and operators.
for operation in self.operations_stack:
if isinstance(operation, ModifierSpec):
self._add_modifier(evaluated_test_object, operation)
if self.apply_modifier:
self._apply_modifier(evaluated_test_object, operation.modifier_name)
elif isinstance(operation, OperatorSpecEditMode):
self._apply_operator_edit_mode(evaluated_test_object, operation)
elif isinstance(operation, OperatorSpecObjectMode):
self._apply_operator_object_mode(operation)
elif isinstance(operation, DeformModifierSpec):
self._apply_deform_modifier(evaluated_test_object, operation)
elif isinstance(operation, ParticleSystemSpec):
self._apply_particle_system(evaluated_test_object, operation)
else:
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(OperatorSpecObjectMode), type(ParticleSystemSpec), type(operation)))
# Compare resulting mesh with expected one.
# Compare only when self.do_compare is set to True, it is set to False for run-test and returns.
if not self.do_compare:
print("Meshes/objects are not compared, compare evaluated and expected object in Blender for "
"visualization only.")
return False
if self.verbose:
print("Comparing expected mesh with resulting mesh...")
evaluated_test_mesh = evaluated_test_object.data
expected_mesh = self.expected_object.data
if self.threshold:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh, threshold=self.threshold)
else:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh)
compare_success = (compare_result == 'Same')
selected_evaluatated_verts = [v.index for v in evaluated_test_mesh.vertices if v.select]
selected_expected_verts = [v.index for v in expected_mesh.vertices if v.select]
if selected_evaluatated_verts != selected_expected_verts:
compare_result = "Selection doesn't match"
compare_success = False
# Also check if invalid geometry (which is never expected) had to be corrected...
validation_success = not evaluated_test_mesh.validate(verbose=True)
if compare_success and validation_success:
if self.verbose:
print("Success!")
# Clean up.
if self.verbose:
print("Cleaning up...")
# Delete evaluated_test_object.
bpy.ops.object.delete()
return True
else:
return self._on_failed_test(compare_result, validation_success, evaluated_test_object)
class RunTest:
"""
Helper class that stores and executes modifier tests.
Example usage:
>>> modifier_list = [
>>> ModifierSpec("firstSUBSURF", "SUBSURF", {"quality": 5}),
>>> ModifierSpec("firstSOLIDIFY", "SOLIDIFY", {"thickness_clamp": 0.9, "thickness": 1})
>>> ]
>>> operator_list = [
>>> OperatorSpecEditMode("delete_edgeloop", {}, "EDGE", MONKEY_LOOP_EDGE),
>>> ]
>>> tests = [
>>> MeshTest("Test1", "testCube", "expectedCube", modifier_list),
>>> MeshTest("Test2", "testCube_2", "expectedCube_2", modifier_list),
>>> MeshTest("MonkeyDeleteEdge", "testMonkey","expectedMonkey", operator_list)
>>> ]
>>> modifiers_test = RunTest(tests)
>>> modifiers_test.run_all_tests()
"""
def __init__(self, tests, apply_modifiers=False, do_compare=False):
"""
Construct a modifier test.
:param tests: list - list of modifier or operator test cases. Each element in the list must contain the
following
in the correct order:
0) test_name: str - unique test name
1) test_object_name: bpy.Types.Object - test object
2) expected_object_name: bpy.Types.Object - expected object
3) modifiers or operators: list - list of mesh_test.ModifierSpec objects or
mesh_test.OperatorSpecEditMode objects
"""
self.tests = tests
self._ensure_unique_test_name_or_raise_error()
self.apply_modifiers = apply_modifiers
self.do_compare = do_compare
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self._failed_tests_list = []
def _ensure_unique_test_name_or_raise_error(self):
"""
Check if the test name is unique else raise an error.
"""
all_test_names = []
for each_test in self.tests:
test_name = each_test.test_name
all_test_names.append(test_name)
seen_name = set()
for ele in all_test_names:
if ele in seen_name:
raise ValueError("{} is a duplicate, write a new unique name.".format(ele))
else:
seen_name.add(ele)
def run_all_tests(self):
"""
Run all tests in self.tests list. Raises an exception if one the tests fails.
"""
for test_number, each_test in enumerate(self.tests):
test_name = each_test.test_name
if self.verbose:
print()
print("Running test {}/{}: {}...".format(test_number+1, len(self.tests), test_name))
success = self.run_test(test_name)
if not success:
self._failed_tests_list.append(test_name)
if len(self._failed_tests_list) != 0:
print("\nFollowing tests failed: {}".format(self._failed_tests_list))
blender_path = bpy.app.binary_path
blend_path = bpy.data.filepath
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
python_path = module.__file__
print("Run following command to open Blender and run the failing test:")
print("{} {} --python {} -- {} {}"
.format(blender_path, blend_path, python_path, "--run-test", "<test_name>"))
raise Exception("Tests {} failed".format(self._failed_tests_list))
def run_test(self, test_name: str):
"""
Run a single test from self.tests list
:param test_name: int - name of test
:return: bool - True if test passed, False otherwise.
"""
case = None
for index, each_test in enumerate(self.tests):
if test_name == each_test.test_name:
case = self.tests[index]
break
if case is None:
raise Exception('No test called {} found!'.format(test_name))
test = case
if self.apply_modifiers:
test.apply_modifier = True
if self.do_compare:
test.do_compare = True
success = test.run_test()
if test.is_test_updated():
# Run the test again if the blend file has been updated.
success = test.run_test()
return success
| 43.898907 | 121 | 0.643835 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# A framework to run regression tests on mesh modifiers and operators based on howardt's mesh_ops_test.py
#
# General idea:
# A test is:
# Object mode
# Select <test_object>
# Duplicate the object
# Select the object
# Apply operation for each operation in <operations_stack> with given parameters
# (an operation is either a modifier or an operator)
# test_mesh = <test_object>.data
# run test_mesh.unit_test_compare(<expected object>.data)
# delete the duplicate object
#
# The words in angle brackets are parameters of the test, and are specified in
# the main class MeshTest.
#
# If the environment variable BLENDER_TEST_UPDATE is set to 1, the <expected_object>
# is updated with the new test result.
# Tests are verbose when the environment variable BLENDER_VERBOSE is set.
import bpy
import functools
import inspect
import os
# Output from this module and from blender itself will occur during tests.
# We need to flush python so that the output is properly interleaved, otherwise
# blender's output for one test will end up showing in the middle of another test...
print = functools.partial(print, flush=True)
class ModifierSpec:
"""
Holds a Generate or Deform or Physics modifier type and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end=0):
"""
Constructs a modifier spec.
:param modifier_name: str - name of object modifier, e.g. "myFirstSubsurfModif"
:param modifier_type: str - type of object modifier, e.g. "SUBSURF"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"quality" : 4}
:param frame_end: int - frame at which simulation needs to be baked or modifier needs to be applied.
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
def __str__(self):
return "Modifier: " + self.modifier_name + " of type " + self.modifier_type + \
" with parameters: " + str(self.modifier_parameters)
class ParticleSystemSpec:
"""
Holds a Particle System modifier and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end: int):
"""
Constructs a particle system spec.
:param modifier_name: str - name of object modifier, e.g. "Particles"
:param modifier_type: str - type of object modifier, e.g. "PARTICLE_SYSTEM"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"seed" : 1}
:param frame_end: int - the last frame of the simulation at which the modifier is applied
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
def __str__(self):
return "Physics Modifier: " + self.modifier_name + " of type " + self.modifier_type + \
" with parameters: " + str(self.modifier_parameters) + " with frame end: " + str(self.frame_end)
class OperatorSpecEditMode:
"""
Holds one operator and its parameters.
"""
def __init__(self, operator_name: str, operator_parameters: dict, select_mode: str, selection: set):
"""
Constructs an OperatorSpecEditMode. Raises ValueError if selec_mode is invalid.
:param operator_name: str - name of mesh operator from bpy.ops.mesh, e.g. "bevel" or "fill"
:param operator_parameters: dict - {name : val} dictionary containing operator parameters.
:param select_mode: str - mesh selection mode, must be either 'VERT', 'EDGE' or 'FACE'
:param selection: set - set of vertices/edges/faces indices to select, e.g. [0, 9, 10].
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
if select_mode not in ['VERT', 'EDGE', 'FACE']:
raise ValueError("select_mode must be either {}, {} or {}".format('VERT', 'EDGE', 'FACE'))
self.select_mode = select_mode
self.selection = selection
def __str__(self):
return "Operator: " + self.operator_name + " with parameters: " + str(self.operator_parameters) + \
" in selection mode: " + self.select_mode + ", selecting " + str(self.selection)
class OperatorSpecObjectMode:
"""
Holds an object operator and its parameters. Helper class for DeformModifierSpec.
Needed to support operations in Object Mode and not Edit Mode which is supported by OperatorSpecEditMode.
"""
def __init__(self, operator_name: str, operator_parameters: dict):
"""
:param operator_name: str - name of the object operator from bpy.ops.object, e.g. "shade_smooth" or "shape_keys"
:param operator_parameters: dict - contains operator parameters.
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
def __str__(self):
return "Operator: " + self.operator_name + " with parameters: " + str(self.operator_parameters)
class DeformModifierSpec:
"""
Holds a list of deform modifier and OperatorSpecObjectMode.
For deform modifiers which have an object operator
"""
def __init__(self, frame_number: int, modifier_list: list, object_operator_spec: OperatorSpecObjectMode = None):
"""
Constructs a Deform Modifier spec (for user input)
:param frame_number: int - the frame at which animated keyframe is inserted
:param modifier_list: ModifierSpec - contains modifiers
:param object_operator_spec: OperatorSpecObjectMode - contains object operators
"""
self.frame_number = frame_number
self.modifier_list = modifier_list
self.object_operator_spec = object_operator_spec
def __str__(self):
return "Modifier: " + str(self.modifier_list) + " with object operator " + str(self.object_operator_spec)
class MeshTest:
"""
A mesh testing class targeted at testing modifiers and operators on a single object.
It holds a stack of mesh operations, i.e. modifiers or operators. The test is executed using
the public method run_test().
"""
def __init__(
self,
test_name: str,
test_object_name: str,
expected_object_name: str,
operations_stack=None,
apply_modifiers=False,
do_compare=False,
threshold=None
):
"""
Constructs a MeshTest object. Raises a KeyError if objects with names expected_object_name
or test_object_name don't exist.
:param test_name: str - unique test name identifier.
:param test_object_name: str - Name of object of mesh type to run the operations on.
:param expected_object_name: str - Name of object of mesh type that has the expected
geometry after running the operations.
:param operations_stack: list - stack holding operations to perform on the test_object.
:param apply_modifiers: bool - True if we want to apply the modifiers right after adding them to the object.
- True if we want to apply the modifier to a list of modifiers, after some operation.
This affects operations of type ModifierSpec and DeformModifierSpec.
:param do_compare: bool - True if we want to compare the test and expected objects, False otherwise.
:param threshold : exponent: To allow variations and accept difference to a certain degree.
"""
if operations_stack is None:
operations_stack = []
for operation in operations_stack:
if not (isinstance(operation, ModifierSpec) or isinstance(operation, OperatorSpecEditMode)
or isinstance(operation, OperatorSpecObjectMode) or isinstance(operation, DeformModifierSpec)
or isinstance(operation, ParticleSystemSpec)):
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(DeformModifierSpec), type(ParticleSystemSpec),
type(operation)))
self.operations_stack = operations_stack
self.apply_modifier = apply_modifiers
self.do_compare = do_compare
self.threshold = threshold
self.test_name = test_name
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self.update = os.getenv('BLENDER_TEST_UPDATE') is not None
# Initialize test objects.
objects = bpy.data.objects
self.test_object = objects[test_object_name]
self.expected_object = objects[expected_object_name]
if self.verbose:
print("Found test object {}".format(test_object_name))
print("Found test object {}".format(expected_object_name))
# Private flag to indicate whether the blend file was updated after the test.
self._test_updated = False
def set_test_object(self, test_object_name):
"""
Set test object for the test. Raises a KeyError if object with given name does not exist.
:param test_object_name: name of test object to run operations on.
"""
objects = bpy.data.objects
self.test_object = objects[test_object_name]
def set_expected_object(self, expected_object_name):
"""
Set expected object for the test. Raises a KeyError if object with given name does not exist
:param expected_object_name: Name of expected object.
"""
objects = bpy.data.objects
self.expected_object = objects[expected_object_name]
def _on_failed_test(self, compare_result, validation_success, evaluated_test_object):
if self.update and validation_success:
if self.verbose:
print("Test failed expectantly. Updating expected mesh...")
# Replace expected object with object we ran operations on, i.e. evaluated_test_object.
evaluated_test_object.location = self.expected_object.location
expected_object_name = self.expected_object.name
evaluated_selection = {v.index for v in evaluated_test_object.data.vertices if v.select}
bpy.data.objects.remove(self.expected_object, do_unlink=True)
evaluated_test_object.name = expected_object_name
self._do_selection(evaluated_test_object.data, "VERT", evaluated_selection)
# Save file.
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
self._test_updated = True
# Set new expected object.
self.expected_object = evaluated_test_object
return True
else:
print("Test comparison result: {}".format(compare_result))
print("Test validation result: {}".format(validation_success))
print("Resulting object mesh '{}' did not match expected object '{}' from file {}".
format(evaluated_test_object.name, self.expected_object.name, bpy.data.filepath))
return False
def is_test_updated(self):
"""
Check whether running the test with BLENDER_TEST_UPDATE actually modified the .blend test file.
:return: Bool - True if blend file has been updated. False otherwise.
"""
return self._test_updated
def _set_parameters_impl(self, modifier, modifier_parameters, nested_settings_path, modifier_name):
"""
Doing a depth first traversal of the modifier parameters and setting their values.
:param: modifier: Of type modifier, its altered to become a setting in recursion.
:param: modifier_parameters : dict or sequence, a simple/nested dictionary of modifier parameters.
:param: nested_settings_path : list(stack): helps in tracing path to each node.
"""
if not isinstance(modifier_parameters, dict):
param_setting = None
for i, setting in enumerate(nested_settings_path):
# We want to set the attribute only when we have reached the last setting.
# Applying of intermediate settings is meaningless.
if i == len(nested_settings_path) - 1:
setattr(modifier, setting, modifier_parameters)
elif hasattr(modifier, setting):
param_setting = getattr(modifier, setting)
# getattr doesn't accept canvas_surfaces["Surface"], but we need to pass it to setattr.
if setting == "canvas_surfaces":
modifier = param_setting.active
else:
modifier = param_setting
else:
# Clean up first
bpy.ops.object.delete()
raise Exception("Modifier '{}' has no parameter named '{}'".
format(modifier_name, setting))
# It pops the current node before moving on to its sibling.
nested_settings_path.pop()
return
for key in modifier_parameters:
nested_settings_path.append(key)
self._set_parameters_impl(modifier, modifier_parameters[key], nested_settings_path, modifier_name)
if nested_settings_path:
nested_settings_path.pop()
def set_parameters(self, modifier, modifier_parameters):
"""
Wrapper for _set_parameters_util
"""
settings = []
modifier_name = modifier.name
self._set_parameters_impl(modifier, modifier_parameters, settings, modifier_name)
def _add_modifier(self, test_object, modifier_spec: ModifierSpec):
"""
Add modifier to object.
:param test_object: bpy.types.Object - Blender object to apply modifier on.
:param modifier_spec: ModifierSpec - ModifierSpec object with parameters
"""
bakers_list = ['CLOTH', 'SOFT_BODY', 'DYNAMIC_PAINT', 'FLUID']
scene = bpy.context.scene
scene.frame_set(1)
modifier = test_object.modifiers.new(modifier_spec.modifier_name,
modifier_spec.modifier_type)
if modifier is None:
raise Exception("This modifier type is already added on the Test Object, please remove it and try again.")
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(modifier_spec.modifier_name, modifier_spec.modifier_type))
# Special case for Dynamic Paint, need to toggle Canvas on.
if modifier.type == "DYNAMIC_PAINT":
bpy.ops.dpaint.type_toggle(type='CANVAS')
self.set_parameters(modifier, modifier_spec.modifier_parameters)
if modifier.type in bakers_list:
self._bake_current_simulation(test_object, modifier.name, modifier_spec.frame_end)
scene.frame_set(modifier_spec.frame_end)
def _apply_modifier(self, test_object, modifier_name):
# Modifier automatically gets applied when converting from Curve to Mesh.
if test_object.type == 'CURVE':
bpy.ops.object.convert(target='MESH')
elif test_object.type == 'MESH':
bpy.ops.object.modifier_apply(modifier=modifier_name)
else:
raise Exception("This object type is not yet supported!")
def _bake_current_simulation(self, test_object, test_modifier_name, frame_end):
"""
FLUID: Bakes the simulation
SOFT BODY, CLOTH, DYNAMIC PAINT: Overrides the point_cache context and then bakes.
"""
for scene in bpy.data.scenes:
for modifier in test_object.modifiers:
if modifier.type == 'FLUID':
bpy.ops.fluid.bake_all()
break
elif modifier.type == 'CLOTH' or modifier.type == 'SOFT_BODY':
test_object.modifiers[test_modifier_name].point_cache.frame_end = frame_end
override_setting = modifier.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
elif modifier.type == 'DYNAMIC_PAINT':
dynamic_paint_setting = modifier.canvas_settings.canvas_surfaces.active
override_setting = dynamic_paint_setting.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
def _apply_particle_system(self, test_object, particle_sys_spec: ParticleSystemSpec):
"""
Applies Particle System settings to test objects
"""
bpy.context.scene.frame_set(1)
bpy.ops.object.select_all(action='DESELECT')
test_object.modifiers.new(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type)
settings_name = test_object.particle_systems.active.settings.name
particle_setting = bpy.data.particles[settings_name]
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type))
for param_name in particle_sys_spec.modifier_parameters:
try:
if param_name == "seed":
system_setting = test_object.particle_systems[particle_sys_spec.modifier_name]
setattr(system_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
else:
setattr(particle_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
if self.verbose:
print("\t set parameter '{}' with value '{}'".
format(param_name, particle_sys_spec.modifier_parameters[param_name]))
except AttributeError:
# Clean up first
bpy.ops.object.delete()
raise AttributeError("Modifier '{}' has no parameter named '{}'".
format(particle_sys_spec.modifier_type, param_name))
bpy.context.scene.frame_set(particle_sys_spec.frame_end)
test_object.select_set(True)
bpy.ops.object.duplicates_make_real()
test_object.select_set(True)
bpy.ops.object.join()
if self.apply_modifier:
self._apply_modifier(test_object, particle_sys_spec.modifier_name)
def _do_selection(self, mesh: bpy.types.Mesh, select_mode: str, selection: set):
"""
Do selection on a mesh
:param mesh: bpy.types.Mesh - input mesh
:param: select_mode: str - selection mode. Must be 'VERT', 'EDGE' or 'FACE'
:param: selection: set - indices of selection.
Example: select_mode='VERT' and selection={1,2,3} selects veritces 1, 2 and 3 of input mesh
"""
# deselect all
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.tool_settings.mesh_select_mode = (select_mode == 'VERT',
select_mode == 'EDGE',
select_mode == 'FACE')
items = (mesh.vertices if select_mode == 'VERT'
else mesh.edges if select_mode == 'EDGE'
else mesh.polygons if select_mode == 'FACE'
else None)
if items is None:
raise ValueError("Invalid selection mode")
for index in selection:
items[index].select = True
def _apply_operator_edit_mode(self, test_object, operator: OperatorSpecEditMode):
"""
Apply operator on test object.
:param test_object: bpy.types.Object - Blender object to apply operator on.
:param operator: OperatorSpecEditMode - OperatorSpecEditMode object with parameters.
"""
self._do_selection(test_object.data, operator.select_mode, operator.selection)
# Apply operator in edit mode.
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type=operator.select_mode)
mesh_operator = getattr(bpy.ops.mesh, operator.operator_name)
try:
retval = mesh_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.mesh has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied {}".format(operator))
bpy.ops.object.mode_set(mode='OBJECT')
def _apply_operator_object_mode(self, operator: OperatorSpecObjectMode):
"""
Applies the object operator.
"""
bpy.ops.object.mode_set(mode='OBJECT')
object_operator = getattr(bpy.ops.object, operator.operator_name)
try:
retval = object_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.object has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied operator {}".format(operator))
def _apply_deform_modifier(self, test_object, operation: list):
"""
param: operation: list: List of modifiers or combination of modifier and object operator.
"""
scene = bpy.context.scene
scene.frame_set(1)
bpy.ops.object.mode_set(mode='OBJECT')
modifier_operations_list = operation.modifier_list
modifier_names = []
object_operations = operation.object_operator_spec
for modifier_operations in modifier_operations_list:
if isinstance(modifier_operations, ModifierSpec):
self._add_modifier(test_object, modifier_operations)
modifier_names.append(modifier_operations.modifier_name)
if isinstance(object_operations, OperatorSpecObjectMode):
self._apply_operator_object_mode(object_operations)
scene.frame_set(operation.frame_number)
if self.apply_modifier:
for mod_name in modifier_names:
self._apply_modifier(test_object, mod_name)
def run_test(self):
"""
Apply operations in self.operations_stack on self.test_object and compare the
resulting mesh with self.expected_object.data
:return: bool - True if the test passed, False otherwise.
"""
self._test_updated = False
bpy.context.view_layer.objects.active = self.test_object
# Duplicate test object.
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
bpy.context.view_layer.objects.active = self.test_object
self.test_object.select_set(True)
bpy.ops.object.duplicate()
evaluated_test_object = bpy.context.active_object
evaluated_test_object.name = "evaluated_object"
if self.verbose:
print()
print(evaluated_test_object.name, "is set to active")
# Add modifiers and operators.
for operation in self.operations_stack:
if isinstance(operation, ModifierSpec):
self._add_modifier(evaluated_test_object, operation)
if self.apply_modifier:
self._apply_modifier(evaluated_test_object, operation.modifier_name)
elif isinstance(operation, OperatorSpecEditMode):
self._apply_operator_edit_mode(evaluated_test_object, operation)
elif isinstance(operation, OperatorSpecObjectMode):
self._apply_operator_object_mode(operation)
elif isinstance(operation, DeformModifierSpec):
self._apply_deform_modifier(evaluated_test_object, operation)
elif isinstance(operation, ParticleSystemSpec):
self._apply_particle_system(evaluated_test_object, operation)
else:
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(OperatorSpecObjectMode), type(ParticleSystemSpec), type(operation)))
# Compare resulting mesh with expected one.
# Compare only when self.do_compare is set to True, it is set to False for run-test and returns.
if not self.do_compare:
print("Meshes/objects are not compared, compare evaluated and expected object in Blender for "
"visualization only.")
return False
if self.verbose:
print("Comparing expected mesh with resulting mesh...")
evaluated_test_mesh = evaluated_test_object.data
expected_mesh = self.expected_object.data
if self.threshold:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh, threshold=self.threshold)
else:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh)
compare_success = (compare_result == 'Same')
selected_evaluatated_verts = [v.index for v in evaluated_test_mesh.vertices if v.select]
selected_expected_verts = [v.index for v in expected_mesh.vertices if v.select]
if selected_evaluatated_verts != selected_expected_verts:
compare_result = "Selection doesn't match"
compare_success = False
# Also check if invalid geometry (which is never expected) had to be corrected...
validation_success = not evaluated_test_mesh.validate(verbose=True)
if compare_success and validation_success:
if self.verbose:
print("Success!")
# Clean up.
if self.verbose:
print("Cleaning up...")
# Delete evaluated_test_object.
bpy.ops.object.delete()
return True
else:
return self._on_failed_test(compare_result, validation_success, evaluated_test_object)
class RunTest:
"""
Helper class that stores and executes modifier tests.
Example usage:
>>> modifier_list = [
>>> ModifierSpec("firstSUBSURF", "SUBSURF", {"quality": 5}),
>>> ModifierSpec("firstSOLIDIFY", "SOLIDIFY", {"thickness_clamp": 0.9, "thickness": 1})
>>> ]
>>> operator_list = [
>>> OperatorSpecEditMode("delete_edgeloop", {}, "EDGE", MONKEY_LOOP_EDGE),
>>> ]
>>> tests = [
>>> MeshTest("Test1", "testCube", "expectedCube", modifier_list),
>>> MeshTest("Test2", "testCube_2", "expectedCube_2", modifier_list),
>>> MeshTest("MonkeyDeleteEdge", "testMonkey","expectedMonkey", operator_list)
>>> ]
>>> modifiers_test = RunTest(tests)
>>> modifiers_test.run_all_tests()
"""
def __init__(self, tests, apply_modifiers=False, do_compare=False):
"""
Construct a modifier test.
:param tests: list - list of modifier or operator test cases. Each element in the list must contain the
following
in the correct order:
0) test_name: str - unique test name
1) test_object_name: bpy.Types.Object - test object
2) expected_object_name: bpy.Types.Object - expected object
3) modifiers or operators: list - list of mesh_test.ModifierSpec objects or
mesh_test.OperatorSpecEditMode objects
"""
self.tests = tests
self._ensure_unique_test_name_or_raise_error()
self.apply_modifiers = apply_modifiers
self.do_compare = do_compare
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self._failed_tests_list = []
def _ensure_unique_test_name_or_raise_error(self):
"""
Check if the test name is unique else raise an error.
"""
all_test_names = []
for each_test in self.tests:
test_name = each_test.test_name
all_test_names.append(test_name)
seen_name = set()
for ele in all_test_names:
if ele in seen_name:
raise ValueError("{} is a duplicate, write a new unique name.".format(ele))
else:
seen_name.add(ele)
def run_all_tests(self):
"""
Run all tests in self.tests list. Raises an exception if one the tests fails.
"""
for test_number, each_test in enumerate(self.tests):
test_name = each_test.test_name
if self.verbose:
print()
print("Running test {}/{}: {}...".format(test_number+1, len(self.tests), test_name))
success = self.run_test(test_name)
if not success:
self._failed_tests_list.append(test_name)
if len(self._failed_tests_list) != 0:
print("\nFollowing tests failed: {}".format(self._failed_tests_list))
blender_path = bpy.app.binary_path
blend_path = bpy.data.filepath
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
python_path = module.__file__
print("Run following command to open Blender and run the failing test:")
print("{} {} --python {} -- {} {}"
.format(blender_path, blend_path, python_path, "--run-test", "<test_name>"))
raise Exception("Tests {} failed".format(self._failed_tests_list))
def run_test(self, test_name: str):
"""
Run a single test from self.tests list
:param test_name: int - name of test
:return: bool - True if test passed, False otherwise.
"""
case = None
for index, each_test in enumerate(self.tests):
if test_name == each_test.test_name:
case = self.tests[index]
break
if case is None:
raise Exception('No test called {} found!'.format(test_name))
test = case
if self.apply_modifiers:
test.apply_modifier = True
if self.do_compare:
test.do_compare = True
success = test.run_test()
if test.is_test_updated():
# Run the test again if the blend file has been updated.
success = test.run_test()
return success
| 2,583 | 0 | 189 |
0d895b5467335fc64091f5a8d915fa9ffe7457ee | 1,822 | py | Python | cli/polyaxon/automl/search_managers/random_search/manager.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/automl/search_managers/random_search/manager.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/automl/search_managers/random_search/manager.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | import copy
from functools import reduce
from operator import mul
from polyaxon.automl.matrix.utils import get_length, sample
from polyaxon.automl.search_managers.base import BaseManager
from polyaxon.automl.search_managers.spec import SuggestionSpec
from polyaxon.automl.search_managers.utils import get_random_generator
from polyaxon.schemas.polyflow.workflows import RandomSearchConfig
class RandomSearchManager(BaseManager):
"""Random search strategy manager for hyperparameter optimization."""
CONFIG = RandomSearchConfig
| 37.958333 | 85 | 0.659166 | import copy
from functools import reduce
from operator import mul
from polyaxon.automl.matrix.utils import get_length, sample
from polyaxon.automl.search_managers.base import BaseManager
from polyaxon.automl.search_managers.spec import SuggestionSpec
from polyaxon.automl.search_managers.utils import get_random_generator
from polyaxon.schemas.polyflow.workflows import RandomSearchConfig
class RandomSearchManager(BaseManager):
"""Random search strategy manager for hyperparameter optimization."""
CONFIG = RandomSearchConfig
def get_suggestions(self, suggestion_params=None):
if not self.config.n_runs:
raise ValueError("This search strategy requires `n_runs`.")
suggestions = []
suggestion_params = suggestion_params or {}
rand_generator = get_random_generator(seed=self.config.seed)
# Validate number of suggestions and total space
all_discrete = True
for v in self.config.matrix.values():
if v.is_continuous:
all_discrete = False
break
n_runs = self.config.n_runs
if all_discrete:
space = reduce(mul, [get_length(v) for v in self.config.matrix.values()])
n_runs = self.config.n_runs if self.config.n_runs <= space else space
while n_runs > 0:
params = copy.deepcopy(suggestion_params)
params.update(
{
k: sample(v, rand_generator=rand_generator)
for k, v in self.config.matrix.items()
}
)
suggestion = SuggestionSpec(params=params)
if suggestion not in suggestions:
suggestions.append(suggestion)
n_runs -= 1
return [suggestion.params for suggestion in suggestions]
| 1,255 | 0 | 27 |
a4f994ce49cc4a42d2efab739a386ae352bb08dd | 423 | py | Python | aclark/db/migrations/0052_note__note.py | aclark4life/aclark-net-1 | e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea | [
"MIT"
] | null | null | null | aclark/db/migrations/0052_note__note.py | aclark4life/aclark-net-1 | e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea | [
"MIT"
] | null | null | null | aclark/db/migrations/0052_note__note.py | aclark4life/aclark-net-1 | e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-18 18:17
from django.db import migrations, models
| 22.263158 | 102 | 0.598109 | # Generated by Django 3.0.7 on 2020-07-18 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0051_note_doc_type'),
]
operations = [
migrations.AddField(
model_name='note',
name='_note',
field=models.ManyToManyField(blank=True, limit_choices_to={'active': True}, to='db.Note'),
),
]
| 0 | 309 | 23 |
5653637ee356e11fdb0fc4dacc1b2b44a6d254f3 | 8,951 | py | Python | main.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | main.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | 2 | 2021-10-12T17:45:49.000Z | 2021-12-21T19:23:30.000Z | main.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Początkowy moduł
"""
import argparse
import numpy
from bitmap_mapper.min_max_difference_coordinates_bitmap_mapper import MinMaxDifferenceCoordinatesBitmapMapper
from data_parsers.classify_data import ClassifyData
from data_parsers.learning_data import LearningData
from feature.simple_features.avg_size_of_hole_feature import AvgSizeOfHoleFeature
from feature.simple_features.avg_size_of_island_feature import AvgSizeOfIslandFeature
from feature.simple_features.first_quart_feature import FirstQuartFeature
from feature.simple_features.first_raw_moment_horizontal import FirstRawMomentHorizontalFeature
from feature.simple_features.first_raw_moment_vertical import FirstRawMomentVerticalFeature
from feature.simple_features.fourth_quart_feature import FourthQuartFeature
from feature.simple_features.longest_non_empty_antidiagonal_feature import LongestNonEmptyAntidiagonalFeature
from feature.simple_features.longest_non_empty_column_feature import LongestNonEmptyColumnFeature
from feature.simple_features.longest_non_empty_diagonal_feature import LongestNonEmptyDiagonalFeature
from feature.simple_features.longest_non_empty_row_feature import LongestNonEmptyRowFeature
from feature.simple_features.max_feature import MaxFeature
from feature.simple_features.max_histogram_feature import MaxHistogramFeature
from feature.simple_features.max_projection_horizontal_feature import MaxProjectionHorizontalFeature
from feature.simple_features.max_projection_horizontal_value_feature import MaxProjectionHorizontalValueFeature
from feature.simple_features.max_projection_vertical_feature import MaxProjectionVerticalFeature
from feature.simple_features.max_projection_vertical_value_feature import MaxProjectionVerticalValueFeature
from feature.simple_features.max_value_histogram_feature import MaxValueHistogramFeature
from feature.simple_features.mean_feature import MeanFeature
from feature.simple_features.median_feature import MedianFeature
from feature.simple_features.min_feature import MinFeature
from feature.simple_features.min_projection_horizontal_feature import MinProjectionHorizontalFeature
from feature.simple_features.min_projection_horizontal_value_feature import MinProjectionHorizontalValueFeature
from feature.simple_features.min_projection_vertical_feature import MinProjectionVerticalFeature
from feature.simple_features.min_projection_vertical_value_feature import MinProjectionVerticalValueFeature
from feature.simple_features.non_empty_columns_feature import NonEmptyColumnsFeature
from feature.simple_features.non_empty_rows_feature import NonEmptyRowsFeature
from feature.simple_features.number_of_holes_feature import NumberOfHolesFeature
from feature.simple_features.number_of_islands_feature import NumberOfIslandsFeature
from feature.simple_features.second_central_moment_horizontal import SecondCentralMomentHorizontalFeature
from feature.simple_features.second_central_moment_vertical import SecondCentralMomentVerticalFeature
from feature.simple_features.second_quart_feature import SecondQuartFeature
from feature.simple_features.third_quart_feature import ThirdQuartFeature
from feature_extractor.feature_extractor import FeatureExtractor
from learning import Learning, LearningClassify
def define_features() -> FeatureExtractor:
"""
Funkcja inicjalizuje extractor wszystkimi feature'ami jakie mamy
:return:
"""
extractor = FeatureExtractor()
extractor.add_feature(MaxFeature())#1
extractor.add_feature(MinFeature())
extractor.add_feature(MeanFeature())
extractor.add_feature(MedianFeature())
extractor.add_feature(NonEmptyColumnsFeature(0.05))#5 - blisko czarnego
extractor.add_feature(NonEmptyRowsFeature(0.05))# blisko czarnego
extractor.add_feature(ThirdQuartFeature())
extractor.add_feature(SecondQuartFeature())
extractor.add_feature(SecondCentralMomentVerticalFeature())
extractor.add_feature(SecondCentralMomentHorizontalFeature())#10
extractor.add_feature(NumberOfIslandsFeature(0.05))# blisko czarnego
extractor.add_feature(NumberOfHolesFeature(0.95))# blisko bialego
extractor.add_feature(FirstRawMomentVerticalFeature())
extractor.add_feature(FirstRawMomentHorizontalFeature())
extractor.add_feature(AvgSizeOfIslandFeature(0.05))# blisko czarnego # 15
extractor.add_feature(AvgSizeOfHoleFeature(0.95))# blisko bialego
extractor.add_feature(FourthQuartFeature())
extractor.add_feature(LongestNonEmptyRowFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyDiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyColumnFeature(0.05))# blisko czarnego # 20
extractor.add_feature(LongestNonEmptyAntidiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(FirstQuartFeature())# 22
extractor.add_feature(MaxProjectionHorizontalFeature())
extractor.add_feature(MaxProjectionHorizontalValueFeature())
extractor.add_feature(MaxProjectionVerticalFeature())
extractor.add_feature(MaxProjectionVerticalValueFeature())
extractor.add_feature(MinProjectionHorizontalFeature())
extractor.add_feature(MinProjectionHorizontalValueFeature())
extractor.add_feature(MinProjectionVerticalFeature())
extractor.add_feature(MinProjectionVerticalValueFeature())
extractor.add_feature(MaxHistogramFeature())
extractor.add_feature(MaxValueHistogramFeature())
return extractor
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='TAIO obrazki w skali szarosci')
subparser = parser.add_subparsers(dest='mode')
parser_training = subparser.add_parser('training', help="Training mode")
parser_training.add_argument("train_path", help="path to training dataset")
parser_training.add_argument("test_path", help="path to testing dataset")
parser_training.add_argument("-o", "--output", help="Output path for model from learning process",
default="model.keras")
parser_classify = subparser.add_parser('classify', help="Classification mode")
parser_classify.add_argument("model_path", help="path to model file")
parser_classify.add_argument("classification_path", help="path to objects to classify")
parser_classify.add_argument("-o", "--output", help="Output path for classification result",
default="output.txt")
args = parser.parse_args()
if args.mode == "classify":
classify_main(args.model_path, args.classification_path, args.output)
elif args.mode == "training":
train_main(args.train_path, args.test_path, args.output)
| 50.286517 | 190 | 0.790414 | #! /usr/bin/env python
"""
Początkowy moduł
"""
import argparse
import numpy
from bitmap_mapper.min_max_difference_coordinates_bitmap_mapper import MinMaxDifferenceCoordinatesBitmapMapper
from data_parsers.classify_data import ClassifyData
from data_parsers.learning_data import LearningData
from feature.simple_features.avg_size_of_hole_feature import AvgSizeOfHoleFeature
from feature.simple_features.avg_size_of_island_feature import AvgSizeOfIslandFeature
from feature.simple_features.first_quart_feature import FirstQuartFeature
from feature.simple_features.first_raw_moment_horizontal import FirstRawMomentHorizontalFeature
from feature.simple_features.first_raw_moment_vertical import FirstRawMomentVerticalFeature
from feature.simple_features.fourth_quart_feature import FourthQuartFeature
from feature.simple_features.longest_non_empty_antidiagonal_feature import LongestNonEmptyAntidiagonalFeature
from feature.simple_features.longest_non_empty_column_feature import LongestNonEmptyColumnFeature
from feature.simple_features.longest_non_empty_diagonal_feature import LongestNonEmptyDiagonalFeature
from feature.simple_features.longest_non_empty_row_feature import LongestNonEmptyRowFeature
from feature.simple_features.max_feature import MaxFeature
from feature.simple_features.max_histogram_feature import MaxHistogramFeature
from feature.simple_features.max_projection_horizontal_feature import MaxProjectionHorizontalFeature
from feature.simple_features.max_projection_horizontal_value_feature import MaxProjectionHorizontalValueFeature
from feature.simple_features.max_projection_vertical_feature import MaxProjectionVerticalFeature
from feature.simple_features.max_projection_vertical_value_feature import MaxProjectionVerticalValueFeature
from feature.simple_features.max_value_histogram_feature import MaxValueHistogramFeature
from feature.simple_features.mean_feature import MeanFeature
from feature.simple_features.median_feature import MedianFeature
from feature.simple_features.min_feature import MinFeature
from feature.simple_features.min_projection_horizontal_feature import MinProjectionHorizontalFeature
from feature.simple_features.min_projection_horizontal_value_feature import MinProjectionHorizontalValueFeature
from feature.simple_features.min_projection_vertical_feature import MinProjectionVerticalFeature
from feature.simple_features.min_projection_vertical_value_feature import MinProjectionVerticalValueFeature
from feature.simple_features.non_empty_columns_feature import NonEmptyColumnsFeature
from feature.simple_features.non_empty_rows_feature import NonEmptyRowsFeature
from feature.simple_features.number_of_holes_feature import NumberOfHolesFeature
from feature.simple_features.number_of_islands_feature import NumberOfIslandsFeature
from feature.simple_features.second_central_moment_horizontal import SecondCentralMomentHorizontalFeature
from feature.simple_features.second_central_moment_vertical import SecondCentralMomentVerticalFeature
from feature.simple_features.second_quart_feature import SecondQuartFeature
from feature.simple_features.third_quart_feature import ThirdQuartFeature
from feature_extractor.feature_extractor import FeatureExtractor
from learning import Learning, LearningClassify
def define_features() -> FeatureExtractor:
"""
Funkcja inicjalizuje extractor wszystkimi feature'ami jakie mamy
:return:
"""
extractor = FeatureExtractor()
extractor.add_feature(MaxFeature())#1
extractor.add_feature(MinFeature())
extractor.add_feature(MeanFeature())
extractor.add_feature(MedianFeature())
extractor.add_feature(NonEmptyColumnsFeature(0.05))#5 - blisko czarnego
extractor.add_feature(NonEmptyRowsFeature(0.05))# blisko czarnego
extractor.add_feature(ThirdQuartFeature())
extractor.add_feature(SecondQuartFeature())
extractor.add_feature(SecondCentralMomentVerticalFeature())
extractor.add_feature(SecondCentralMomentHorizontalFeature())#10
extractor.add_feature(NumberOfIslandsFeature(0.05))# blisko czarnego
extractor.add_feature(NumberOfHolesFeature(0.95))# blisko bialego
extractor.add_feature(FirstRawMomentVerticalFeature())
extractor.add_feature(FirstRawMomentHorizontalFeature())
extractor.add_feature(AvgSizeOfIslandFeature(0.05))# blisko czarnego # 15
extractor.add_feature(AvgSizeOfHoleFeature(0.95))# blisko bialego
extractor.add_feature(FourthQuartFeature())
extractor.add_feature(LongestNonEmptyRowFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyDiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyColumnFeature(0.05))# blisko czarnego # 20
extractor.add_feature(LongestNonEmptyAntidiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(FirstQuartFeature())# 22
extractor.add_feature(MaxProjectionHorizontalFeature())
extractor.add_feature(MaxProjectionHorizontalValueFeature())
extractor.add_feature(MaxProjectionVerticalFeature())
extractor.add_feature(MaxProjectionVerticalValueFeature())
extractor.add_feature(MinProjectionHorizontalFeature())
extractor.add_feature(MinProjectionHorizontalValueFeature())
extractor.add_feature(MinProjectionVerticalFeature())
extractor.add_feature(MinProjectionVerticalValueFeature())
extractor.add_feature(MaxHistogramFeature())
extractor.add_feature(MaxValueHistogramFeature())
return extractor
def classify_main(model_path: str, classify_data_path: str, output: str):
extractor = define_features()
data = ClassifyData(classify_data_path, extractor, MinMaxDifferenceCoordinatesBitmapMapper())
data.get_classify_data()
data.LoadDeletedColumns(model_path)
model = LearningClassify(model_path)
classes = model.classify(data)
with open(output, "w") as f:
for c in classes:
f.write(f"{str(c)}\n")
def train_main(training_path: str, test_path: str, output_path: str):
extractor = define_features()
data = LearningData(training_path, test_path, extractor, MinMaxDifferenceCoordinatesBitmapMapper())
rowMask = CalculateFeaturesToIgnore(data, output_path)
data.SetDeletedColumns(rowMask, output_path)
model = Learning(extractor.feature_count() - len(numpy.where(rowMask)[0]), data.get_class_count()) # nie ma latwego sposobu na wylicznie ilosci klas. W moich danych testowych sa 4 klasy.
model.plot_history(model.learn(data, 1024, 8), output_path)
model.save_model(output_path)
def CalculateFeaturesToIgnore(data, output_path):
featuresMatrix = data.get_training_data()[0]
corr = numpy.corrcoef(featuresMatrix.T)
numpy.savetxt(output_path + ".corrarr", corr, fmt="%0.2e", delimiter=",")
arrayCsv = open(output_path + ".corrarr", "r")
lines = arrayCsv.readlines()
k=0
newlines = []
classesList = data.GetFeaturesNames()
classesStr = ","
k=0
for cls in classesList:
if k==0:
k+=1
else:
classesStr += ","
classesStr += cls
classesStr+="\n"
newlines.append(classesStr)
k=0
for line in lines:
newlines.append(classesList[k]+","+line)
k+=1
arrayCsv.close()
arrayCsv = open(output_path + ".corrarr", "w")
arrayCsv.writelines(newlines)
arrayCsv.close()
rowMask = numpy.all(numpy.isnan(corr), axis=1)
for index in range(numpy.shape(corr)[0]):
if rowMask[index] == True:
pass
else:
for oIndex in range(numpy.shape(corr)[0]):
if oIndex <= index:
pass
else:
if numpy.abs(corr[index, oIndex]) > 0.9:
rowMask[oIndex] = True
data.SetActiveFeatures(rowMask)
return rowMask
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='TAIO obrazki w skali szarosci')
subparser = parser.add_subparsers(dest='mode')
parser_training = subparser.add_parser('training', help="Training mode")
parser_training.add_argument("train_path", help="path to training dataset")
parser_training.add_argument("test_path", help="path to testing dataset")
parser_training.add_argument("-o", "--output", help="Output path for model from learning process",
default="model.keras")
parser_classify = subparser.add_parser('classify', help="Classification mode")
parser_classify.add_argument("model_path", help="path to model file")
parser_classify.add_argument("classification_path", help="path to objects to classify")
parser_classify.add_argument("-o", "--output", help="Output path for classification result",
default="output.txt")
args = parser.parse_args()
if args.mode == "classify":
classify_main(args.model_path, args.classification_path, args.output)
elif args.mode == "training":
train_main(args.train_path, args.test_path, args.output)
| 2,260 | 0 | 69 |
73cb7f84904c9f5e9797faba056f08ccdf349848 | 449 | py | Python | auxilia/funcao truncar.py | HigorAnjos/Fundamentos-VI | e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb | [
"MIT"
] | null | null | null | auxilia/funcao truncar.py | HigorAnjos/Fundamentos-VI | e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb | [
"MIT"
] | null | null | null | auxilia/funcao truncar.py | HigorAnjos/Fundamentos-VI | e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb | [
"MIT"
] | null | null | null |
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
| 23.631579 | 69 | 0.639198 |
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
| 0 | 0 | 0 |
350f482e992f32b93957d2a42ad52be3552c6bce | 4,465 | py | Python | linux-exec.py | mlyapin/linux-exec | 0ccd813407f928b2f9120e637dabaae4c241fb91 | [
"MIT"
] | null | null | null | linux-exec.py | mlyapin/linux-exec | 0ccd813407f928b2f9120e637dabaae4c241fb91 | [
"MIT"
] | null | null | null | linux-exec.py | mlyapin/linux-exec | 0ccd813407f928b2f9120e637dabaae4c241fb91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import subprocess
import hashlib
import typing
import dataclasses
DOCKERFILE_BASIC = """
FROM archlinux
# Update the system
RUN pacman --noconfirm -Syu
# We will append commands to install packages later. See the function `construct_dockerfile()`
"""
@dataclasses.dataclass
def calc_image_hash(config: Config) -> str:
"""Calculate image hash from given a config."""
return hashlib.md5(config.packages.encode()).hexdigest()
def docker_images(config: Config) -> typing.Iterator[str]:
"""Get all present docker images."""
toexec = [config.docker_cmd, "image", "ls",
"--format", "{{.Repository}}:{{.Tag}}"]
p = subprocess.run(toexec, capture_output=True, text=True)
p.check_returncode()
for tag in p.stdout.splitlines():
yield tag
main()
| 29.375 | 98 | 0.603135 | #!/usr/bin/env python3
import sys
import os
import subprocess
import hashlib
import typing
import dataclasses
DOCKERFILE_BASIC = """
FROM archlinux
# Update the system
RUN pacman --noconfirm -Syu
# We will append commands to install packages later. See the function `construct_dockerfile()`
"""
@dataclasses.dataclass
class Config:
cmdprefix: str = "le-"
memory: str = "1g"
docker_cmd: str = "docker"
packages: str = ""
mounts: [str] = dataclasses.field(default_factory=list)
cwd: str = dataclasses.field(default_factory=os.getcwd)
tag: str = None
repo: str = "linux-exec"
def gen_tag(self):
self.tag = calc_image_hash(self)
def __post_init__(self):
self.gen_tag()
def get_repotag(self) -> str:
return self.repo + ":" + self.tag
@classmethod
def from_env(cls):
c = Config()
c.cmdprefix = os.getenv("LE_PREFIX", c.cmdprefix)
c.memory = os.getenv("LE_MEM", c.memory)
c.docker_cmd = os.getenv("LE_DOCKERCMD", c.docker_cmd)
c.packages = os.getenv("LE_PACKAGES", c.packages)
c.cwd = os.getenv("LE_CWD", c.cwd)
c.repo = os.getenv("LE_REPO", c.repo)
mounts = os.getenv("LE_MOUNTS")
if mounts is not None:
c.mounts = mounts.split(":")
c.gen_tag()
return c
def calc_image_hash(config: Config) -> str:
"""Calculate image hash from given a config."""
return hashlib.md5(config.packages.encode()).hexdigest()
def construct_dockerfile(config: Config) -> str:
# We install packages one by one to reuse intermediate fs layers.
def gen_install_commands(config: Config) -> typing.Iterator[str]:
for package in config.packages.split(" "):
if len(package) > 0:
yield "RUN pacman --noconfirm -S {}".format(package)
return os.linesep.join([DOCKERFILE_BASIC] + list(gen_install_commands(config)))
def docker_images(config: Config) -> typing.Iterator[str]:
"""Get all present docker images."""
toexec = [config.docker_cmd, "image", "ls",
"--format", "{{.Repository}}:{{.Tag}}"]
p = subprocess.run(toexec, capture_output=True, text=True)
p.check_returncode()
for tag in p.stdout.splitlines():
yield tag
def ensure_image_relevant(c: Config):
if c.get_repotag() not in docker_images(c):
print("Required image is missing. Building...")
build_image(c)
def build_image(config: Config):
toexec = [config.docker_cmd, "build",
"-m", config.memory,
"--label", "packages=" + config.packages,
"-t", config.get_repotag(),
# The dockerfile will be passed via stdin
"-"]
subprocess.run(toexec, encoding=sys.getdefaultencoding(),
input=construct_dockerfile(config))
def run_cmd(config: Config, cmd: str, args: [str]):
ensure_image_relevant(config)
toexec = [config.docker_cmd, "run",
"-m", config.memory,
"--rm",
"--init",
"-i",
# Mount and change working directory.
"-v", "".join([config.cwd, ":", config.cwd]),
"-w", config.cwd]
# Mount requested directories
for d in config.mounts:
if len(d) > 0:
toexec += ["-v", "".join([d, ":", d])]
# If stdin is piped from another process, do not allocate TTY.
indata = None
if sys.stdin.isatty():
toexec += ["-t"]
else:
indata = sys.stdin.buffer.read()
toexec += [config.get_repotag(), "sh", "-c",
" ".join([cmd] + args)]
subprocess.run(toexec, input=indata)
def main():
config = Config.from_env()
program = os.path.basename(sys.argv[0])
args = sys.argv[1:]
if not os.path.islink(sys.argv[0]):
if len(args) == 0:
# If the script was executed directly without arguments, build the image.
build_image(config)
else:
# If arguments were passed, treat them as commands.
run_cmd(config, args[0], args[1:])
elif program[:len(config.cmdprefix)] == config.cmdprefix:
# If the script was executed via link with a suffix, strip the prefix and run the command.
run_cmd(config, program[len(config.cmdprefix):], args)
else:
# If the script was executed via link without a suffix, just run the command.
run_cmd(config, program, args)
main()
| 3,092 | 390 | 137 |
b6d3c2efe0a57a760b099d50f8e560dd83d6a39e | 966 | py | Python | PyUniversalKit/Netkit/test.py | yangtang-special/PyUniversalKit | 39ea53512d255cec194a7e9582bd05417233b845 | [
"MIT"
] | 4 | 2021-09-03T11:50:48.000Z | 2022-03-18T02:26:23.000Z | PyUniversalKit/Netkit/test.py | yangtang-special/PyUniversalKit | 39ea53512d255cec194a7e9582bd05417233b845 | [
"MIT"
] | null | null | null | PyUniversalKit/Netkit/test.py | yangtang-special/PyUniversalKit | 39ea53512d255cec194a7e9582bd05417233b845 | [
"MIT"
] | null | null | null | import requests
from .models import TestResponse
from typing import Union,Dict
| 26.833333 | 154 | 0.613872 | import requests
from .models import TestResponse
from typing import Union,Dict
class Test():
def get(self,url:str,headers:Dict[str,Union[str,int,float,bytes]],encoding:str) -> Union[TestResponse,str]:
try:
response = requests.get(url=url, headers=headers)
response.encoding = encoding
return TestResponse(_content=response)
except Exception as e:
return "error:%s" % str(e)
def post(self,url:str,data:Dict[str,Union[str,int,float,bytes]],headers:Dict[str,Union[str,int,float,bytes]],encoding:str) -> Union[TestResponse,str]:
try:
proxies = {
"http": "http://82.114.93.210:8080"
}
response = requests.post(url=url, headers=headers, data=data,proxies=proxies)
response.encoding = encoding
return TestResponse(_content=response)
except Exception as e:
return "error:%s" % str(e)
| 808 | -8 | 76 |
15d57e2727120c6ebac03e3adaf2d3552e6cc5d0 | 6,794 | py | Python | Other_Python/ThetaGPU_Ray/source/post_analyses.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | 8 | 2020-09-02T14:46:07.000Z | 2021-11-29T15:27:05.000Z | Other_Python/ThetaGPU_Ray/source/post_analyses.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | 18 | 2020-11-13T18:49:33.000Z | 2022-03-12T00:54:43.000Z | Other_Python/ThetaGPU_Ray/source/post_analyses.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | 5 | 2019-09-25T23:57:00.000Z | 2021-04-18T08:15:34.000Z | import os
import xarray as xr
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.dirname(dir_path)
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from utils import plot_averaged_errors, plot_windowed_errors, plot_contours, plot_bars
if __name__ == '__main__':
print('Analysis module') | 40.682635 | 129 | 0.6937 | import os
import xarray as xr
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.dirname(dir_path)
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from utils import plot_averaged_errors, plot_windowed_errors, plot_contours, plot_bars
def perform_analyses(data_paths,var_time,cadence,num_ips,num_ops,output_gap,num_modes,test_fields,forecast,save_path,subregions):
pod_modes = np.load(data_paths['pod_modes'])[:,:num_modes]
snapshots_mean = np.load(data_paths['training_mean'])
lead_time = num_ops
test_fields = test_fields.reshape(103,120,-1)[:,:,:var_time+num_ips+num_ops+output_gap]
snapshots_mean = snapshots_mean.reshape(103,120)
persistence_maes = np.zeros(shape=(num_ops,len(subregions)+1),dtype='float32')
climatology_maes = np.zeros(shape=(num_ops,len(subregions)+1),dtype='float32')
predicted_maes = np.zeros(shape=(num_ops,len(subregions)+1),dtype='float32')
# Climatology calculation
train_fields = np.load(data_paths['training_fields']).T
yearly_snaps = int(365*cadence)
num_years = train_fields.shape[0]//yearly_snaps
climatology = train_fields[:yearly_snaps]
for year in range(1,num_years):
climatology = climatology + train_fields[year*yearly_snaps:(year+1)*yearly_snaps]
climatology = climatology/num_years
climatology = climatology.T.reshape(103,120,-1)
if var_time+num_ips+num_ops+output_gap > yearly_snaps:
climatology_lead = climatology[:,:,:var_time+num_ips+num_ops]
climatology_trail = climatology[:,:,var_time+num_ips+num_ops:]
climatology = np.concatenate((climatology_trail,climatology_lead),axis=-1)
# Num snaps predicted
num_snaps_pred = test_fields.shape[-1]
num_years_pred = num_snaps_pred//yearly_snaps
climatology = np.tile(climatology,(1,1,num_years_pred))
# Fix trailing dimension
if climatology.shape[-1] != test_fields.shape[-1]:
tile_diff = abs(climatology.shape[-1]-test_fields.shape[-1])
climatology = np.concatenate((climatology,climatology[:,:,:tile_diff]),axis=-1)
else:
climatology = climatology[:,:,:var_time+num_ips+num_ops+output_gap]
# # MAE of climatology
# climatology_maes = np.mean(np.abs(test_fields[:,:,:climatology.shape[-1]] - climatology),axis=-1)
# For different lead times - output gap has been removed here
for lead_time in range(num_ops):
# Predicted test
pred_test = forecast[:var_time,lead_time,:]
# Global analyses
# Reconstruct
predicted = snapshots_mean[:,:,None] + np.matmul(pod_modes,pred_test.T).reshape(103,120,-1)
# persistence predictions
persistence_fields = test_fields[:,:,num_ips-(lead_time+1):num_ips-(lead_time+1)+var_time]
# Post analyses - unify time slices
test_fields_temp = test_fields[:,:,output_gap+num_ips+lead_time:output_gap+num_ips+lead_time+var_time]
# Climatology predictions
clim_fields = climatology[:,:,output_gap+num_ips+lead_time:output_gap+num_ips+lead_time+var_time]
# Local analysis
region_num = 0
for region in subregions:
mask = np.asarray(xr.open_dataset(region)['mask'])
pred_local = predicted[mask==1,:]
pers_local = persistence_fields[mask==1,:]
clim_local = clim_fields[mask==1,:]
test_fields_local = test_fields_temp[mask==1,:]
mae = np.mean(np.abs(pers_local-test_fields_local))
persistence_maes[lead_time,region_num] = mae
mae = np.mean(np.abs(pred_local-test_fields_local))
predicted_maes[lead_time,region_num] = mae
mae = np.mean(np.abs(clim_local-test_fields_local))
climatology_maes[lead_time,region_num] = mae
region_num+=1
# Total
region_num = -1
mae = np.mean(np.abs(persistence_fields-test_fields_temp))
persistence_maes[lead_time,region_num] = mae
mae = np.mean(np.abs(predicted-test_fields_temp))
predicted_maes[lead_time,region_num] = mae
mae = np.mean(np.abs(clim_fields-test_fields_temp))
climatology_maes[lead_time,region_num] = mae
if lead_time == num_ops-1:
# Visualizations
pred_mae, pred_cos = plot_averaged_errors(test_fields_temp,predicted,snapshots_mean)
pers_mae, pers_cos = plot_averaged_errors(test_fields_temp,persistence_fields,snapshots_mean)
clim_mae, clim_cos = plot_averaged_errors(test_fields_temp,clim_fields,snapshots_mean)
plot_contours(pred_mae,0,150,'MAE',save_path+'/MAE_Pred.png')
plot_contours(pred_cos,-1.0,1.0,'Cosine Similarity',save_path+'/COS_Pred.png')
plot_contours(clim_mae-pred_mae,-10,10,'Difference MAE',save_path+'/Difference_MAE_Clim.png')
plot_contours(pred_cos-clim_cos,-0.5,0.5,'Difference Cosine Similarity',save_path+'/Difference_COS_Clim.png')
plot_contours(pers_mae-pred_mae,-10,10,'Difference MAE',save_path+'/Difference_MAE_Pers.png')
plot_contours(pred_cos-pers_cos,-0.5,0.5,'Difference Cosine Similarity',save_path+'/Difference_COS_Pers.png')
# # For the specific days
# pred_mae, pred_cos = plot_windowed_errors(test_fields,predicted,snapshots_mean,int_start=120,int_end=150)
# pers_mae, pers_cos = plot_windowed_errors(test_fields,persistence_fields,snapshots_mean,int_start=120,int_end=150)
# plot_contours(pers_mae-pred_mae,-10,10,'Difference MAE',save_path+'/Difference_MAE_Windowed.png')
# plot_contours(pred_cos-pers_cos,-0.5,0.5,'Difference Cosine Similarity',save_path+'/Difference_COS_Windowed.png')
# Save RMSE predictions
np.savetxt(save_path+'/persistence_maes.txt',persistence_maes)
np.savetxt(save_path+'/predicted_maes.txt',predicted_maes)
np.savetxt(save_path+'/climatology_maes.txt',climatology_maes)
# Make a plot of them
plot_bars(persistence_maes[:,:-1],climatology_maes[:,:-1],predicted_maes[:,:-1],subregions,save_path)
def plot_obj(obj_array,save_path):
fig,ax = plt.subplots(nrows=1,ncols=3)
ax[0].plot(obj_array[:0],label='Old background')
ax[0].plot(obj_array[:1],label='Old likelihood')
ax[0].legend()
ax[0].set_xlabel('Timestep')
ax[1].plot(obj_array[:2],label='New background')
ax[1].plot(obj_array[:3],label='New likelihood')
ax[1].legend()
ax[1].set_xlabel('Timestep')
ax[1].plot(obj_array[:4],label='0-Fail, 1-Success')
ax[1].legend()
ax[1].set_xlabel('Timestep')
if isinstance(save_path,str):
plt.savefig(save_path)
plt.close()
if __name__ == '__main__':
print('Analysis module') | 6,403 | 0 | 46 |
fdf60a95ab6ea7e7941b21b1434f1f1f7908eeb0 | 13,728 | py | Python | train_single_scale.py | Mawiszus/World-GAN | 0ad21849e284e18c44e7ffede0eefb764e0ff4bb | [
"MIT"
] | 41 | 2021-06-21T03:31:16.000Z | 2022-02-17T08:06:07.000Z | train_single_scale.py | codingwatching/World-GAN | 0ad21849e284e18c44e7ffede0eefb764e0ff4bb | [
"MIT"
] | 1 | 2021-10-24T02:08:53.000Z | 2021-11-14T22:15:40.000Z | train_single_scale.py | codingwatching/World-GAN | 0ad21849e284e18c44e7ffede0eefb764e0ff4bb | [
"MIT"
] | 1 | 2021-09-12T08:00:52.000Z | 2021-09-12T08:00:52.000Z | import os
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.functional import interpolate
from loguru import logger
from tqdm import tqdm
import numpy as np
import wandb
from draw_concat import draw_concat
from generate_noise import generate_spatial_noise
from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world
from minecraft.level_renderer import render_minecraft
from models import calc_gradient_penalty, save_networks
from utils import interpolate3D
def update_noise_amplitude(z_prev, real, opt):
""" Update the amplitude of the noise for the current scale according to the previous noise map. """
RMSE = torch.sqrt(F.mse_loss(real, z_prev))
return opt.noise_update * RMSE
def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt):
""" Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the
original level, generators and noise_maps contain information from previous scales and will receive information in
this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold
the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """
current_scale = len(generators)
clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world
if opt.use_multiple_inputs:
real_group = []
nzx_group = []
nzy_group = []
nz_group = []
for scale_group in reals:
real_group.append(scale_group[current_scale])
nzx_group.append(scale_group[current_scale].shape[2])
nzy_group.append(scale_group[current_scale].shape[3])
nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3]))
curr_noises = [0 for _ in range(len(real_group))]
curr_prevs = [0 for _ in range(len(real_group))]
curr_z_prevs = [0 for _ in range(len(real_group))]
else:
real = reals[current_scale]
nz = real.shape[2:]
padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer
if not opt.pad_with_noise:
# pad_noise = nn.ConstantPad3d(padsize, 0)
# pad_image = nn.ConstantPad3d(padsize, 0)
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
else:
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
# setup optimizer
optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma)
if current_scale == 0: # Generate new noise
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
z_opt = pad_noise(z_opt)
else: # Add noise to previous output
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_opt = pad_noise(z_opt)
logger.info("Training at scale {}", current_scale)
grad_d_real = []
grad_d_fake = []
grad_g = []
for p in D.parameters():
grad_d_real.append(torch.zeros(p.shape).to(opt.device))
grad_d_fake.append(torch.zeros(p.shape).to(opt.device))
for p in G.parameters():
grad_g.append(torch.zeros(p.shape).to(opt.device))
for epoch in tqdm(range(opt.niter)):
step = current_scale * opt.niter + epoch
if opt.use_multiple_inputs:
group_steps = len(real_group)
noise_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
noise_ = pad_noise(noise_)
noise_group.append(noise_)
else:
group_steps = 1
noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
noise_ = pad_noise(noise_)
for curr_inp in range(group_steps):
if opt.use_multiple_inputs:
real = real_group[curr_inp]
nz = nz_group[curr_inp]
z_opt = z_opt_group[curr_inp]
noise_ = noise_group[curr_inp]
prev_scale_results = input_from_prev_scale[curr_inp]
opt.curr_inp = curr_inp
else:
prev_scale_results = input_from_prev_scale
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
for j in range(opt.Dsteps):
# train with real
D.zero_grad()
output = D(real).to(opt.device)
errD_real = -output.mean()
errD_real.backward(retain_graph=True)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item())
diff_d_real = np.mean(cos_sim)
grad_d_real = grads_after
# train with fake
if (j == 0) & (epoch == 0):
if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch
prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
prev_scale_results = prev
prev = pad_image(prev)
z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_prev = pad_noise(z_prev)
opt.noise_amp = 1
else: # First step in NOT the lowest scale
# We need to adapt our inputs from the previous scale and add noise to it
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True)
prev = pad_image(prev)
z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rec", pad_noise, pad_image, opt)
z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True)
opt.noise_amp = update_noise_amplitude(z_prev, real, opt)
z_prev = pad_image(z_prev)
else: # Any other step
if opt.use_multiple_inputs:
z_prev = curr_z_prevs[curr_inp]
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False)
prev = pad_image(prev)
# After creating our correct noise input, we feed it to the generator:
noise = opt.noise_amp * noise_ + prev
fake = G(noise.detach(), prev)
# Then run the result through the discriminator
output = D(fake.detach())
errD_fake = output.mean()
# Backpropagation
errD_fake.backward(retain_graph=False)
# Gradient Penalty
gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device)
gradient_penalty.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item())
diff_d_fake = np.mean(cos_sim)
grad_d_fake = grads_after
# Logging:
if step % 10 == 0:
wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(),
f"D(x)@{current_scale}": -errD_real.item(),
f"gradient_penalty@{current_scale}": gradient_penalty.item(),
f"D_real_grad@{current_scale}": diff_d_real,
f"D_fake_grad@{current_scale}": diff_d_fake,
},
step=step, sync=False)
optimizerD.step()
if opt.use_multiple_inputs:
z_opt_group[curr_inp] = z_opt
input_from_prev_scale[curr_inp] = prev_scale_results
curr_noises[curr_inp] = noise
curr_prevs[curr_inp] = prev
curr_z_prevs[curr_inp] = z_prev
############################
# (2) Update G network: maximize D(G(z))
###########################
for j in range(opt.Gsteps):
G.zero_grad()
fake = G(noise.detach(), prev.detach(), temperature=1)
output = D(fake)
errG = -output.mean()
errG.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(G.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item())
diff_g = np.mean(cos_sim)
grad_g = grads_after
if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space
Z_opt = opt.noise_amp * z_opt + z_prev
G_rec = G(Z_opt.detach(), z_prev, temperature=1)
rec_loss = opt.alpha * F.mse_loss(G_rec, real)
rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True
rec_loss = rec_loss.detach()
else: # We are not trying to find an exact recreation
rec_loss = torch.zeros([])
Z_opt = z_opt
optimizerG.step()
# More Logging:
if step % 10 == 0:
wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp,
f"rec_loss@{current_scale}": rec_loss.item(),
f"G_grad@{current_scale}": diff_g},
step=step, sync=False, commit=True)
# Rendering and logging images of levels
if epoch % 500 == 0 or epoch == (opt.niter - 1):
token_list = opt.token_list
to_level = one_hot_to_blockdata_level
try:
subprocess.call(["wine", '--version'])
real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type)
# Minecraft World
worldname = 'Curr_Empty_World'
clear_empty_world(opt.output_dir, worldname) # reset tmp world
to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type),
to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)]
render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"]
obj_pth = os.path.join(opt.out_, f"objects/{current_scale}")
os.makedirs(obj_pth, exist_ok=True)
for n, level in enumerate(to_render):
pos = n * (level.shape[0] + 5)
save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props)
curr_coords = [[pos, pos + real_scaled.shape[0]],
[0, real_scaled.shape[1]],
[0, real_scaled.shape[2]]]
render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n])
wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False)
except OSError:
pass
# Learning Rate scheduler step
schedulerD.step()
schedulerG.step()
# Save networks
if opt.use_multiple_inputs:
z_opt = z_opt_group
torch.save(z_opt, "%s/z_opt.pth" % opt.outf)
save_networks(G, D, z_opt, opt)
wandb.save(opt.outf)
return z_opt, input_from_prev_scale, G
| 43.719745 | 119 | 0.565487 | import os
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.functional import interpolate
from loguru import logger
from tqdm import tqdm
import numpy as np
import wandb
from draw_concat import draw_concat
from generate_noise import generate_spatial_noise
from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world
from minecraft.level_renderer import render_minecraft
from models import calc_gradient_penalty, save_networks
from utils import interpolate3D
def update_noise_amplitude(z_prev, real, opt):
""" Update the amplitude of the noise for the current scale according to the previous noise map. """
RMSE = torch.sqrt(F.mse_loss(real, z_prev))
return opt.noise_update * RMSE
def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt):
""" Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the
original level, generators and noise_maps contain information from previous scales and will receive information in
this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold
the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """
current_scale = len(generators)
clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world
if opt.use_multiple_inputs:
real_group = []
nzx_group = []
nzy_group = []
nz_group = []
for scale_group in reals:
real_group.append(scale_group[current_scale])
nzx_group.append(scale_group[current_scale].shape[2])
nzy_group.append(scale_group[current_scale].shape[3])
nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3]))
curr_noises = [0 for _ in range(len(real_group))]
curr_prevs = [0 for _ in range(len(real_group))]
curr_z_prevs = [0 for _ in range(len(real_group))]
else:
real = reals[current_scale]
nz = real.shape[2:]
padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer
if not opt.pad_with_noise:
# pad_noise = nn.ConstantPad3d(padsize, 0)
# pad_image = nn.ConstantPad3d(padsize, 0)
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
else:
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
# setup optimizer
optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma)
if current_scale == 0: # Generate new noise
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
z_opt = pad_noise(z_opt)
else: # Add noise to previous output
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_opt = pad_noise(z_opt)
logger.info("Training at scale {}", current_scale)
grad_d_real = []
grad_d_fake = []
grad_g = []
for p in D.parameters():
grad_d_real.append(torch.zeros(p.shape).to(opt.device))
grad_d_fake.append(torch.zeros(p.shape).to(opt.device))
for p in G.parameters():
grad_g.append(torch.zeros(p.shape).to(opt.device))
for epoch in tqdm(range(opt.niter)):
step = current_scale * opt.niter + epoch
if opt.use_multiple_inputs:
group_steps = len(real_group)
noise_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
noise_ = pad_noise(noise_)
noise_group.append(noise_)
else:
group_steps = 1
noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
noise_ = pad_noise(noise_)
for curr_inp in range(group_steps):
if opt.use_multiple_inputs:
real = real_group[curr_inp]
nz = nz_group[curr_inp]
z_opt = z_opt_group[curr_inp]
noise_ = noise_group[curr_inp]
prev_scale_results = input_from_prev_scale[curr_inp]
opt.curr_inp = curr_inp
else:
prev_scale_results = input_from_prev_scale
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
for j in range(opt.Dsteps):
# train with real
D.zero_grad()
output = D(real).to(opt.device)
errD_real = -output.mean()
errD_real.backward(retain_graph=True)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item())
diff_d_real = np.mean(cos_sim)
grad_d_real = grads_after
# train with fake
if (j == 0) & (epoch == 0):
if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch
prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
prev_scale_results = prev
prev = pad_image(prev)
z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_prev = pad_noise(z_prev)
opt.noise_amp = 1
else: # First step in NOT the lowest scale
# We need to adapt our inputs from the previous scale and add noise to it
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True)
prev = pad_image(prev)
z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rec", pad_noise, pad_image, opt)
z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True)
opt.noise_amp = update_noise_amplitude(z_prev, real, opt)
z_prev = pad_image(z_prev)
else: # Any other step
if opt.use_multiple_inputs:
z_prev = curr_z_prevs[curr_inp]
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False)
prev = pad_image(prev)
# After creating our correct noise input, we feed it to the generator:
noise = opt.noise_amp * noise_ + prev
fake = G(noise.detach(), prev)
# Then run the result through the discriminator
output = D(fake.detach())
errD_fake = output.mean()
# Backpropagation
errD_fake.backward(retain_graph=False)
# Gradient Penalty
gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device)
gradient_penalty.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item())
diff_d_fake = np.mean(cos_sim)
grad_d_fake = grads_after
# Logging:
if step % 10 == 0:
wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(),
f"D(x)@{current_scale}": -errD_real.item(),
f"gradient_penalty@{current_scale}": gradient_penalty.item(),
f"D_real_grad@{current_scale}": diff_d_real,
f"D_fake_grad@{current_scale}": diff_d_fake,
},
step=step, sync=False)
optimizerD.step()
if opt.use_multiple_inputs:
z_opt_group[curr_inp] = z_opt
input_from_prev_scale[curr_inp] = prev_scale_results
curr_noises[curr_inp] = noise
curr_prevs[curr_inp] = prev
curr_z_prevs[curr_inp] = z_prev
############################
# (2) Update G network: maximize D(G(z))
###########################
for j in range(opt.Gsteps):
G.zero_grad()
fake = G(noise.detach(), prev.detach(), temperature=1)
output = D(fake)
errG = -output.mean()
errG.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(G.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item())
diff_g = np.mean(cos_sim)
grad_g = grads_after
if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space
Z_opt = opt.noise_amp * z_opt + z_prev
G_rec = G(Z_opt.detach(), z_prev, temperature=1)
rec_loss = opt.alpha * F.mse_loss(G_rec, real)
rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True
rec_loss = rec_loss.detach()
else: # We are not trying to find an exact recreation
rec_loss = torch.zeros([])
Z_opt = z_opt
optimizerG.step()
# More Logging:
if step % 10 == 0:
wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp,
f"rec_loss@{current_scale}": rec_loss.item(),
f"G_grad@{current_scale}": diff_g},
step=step, sync=False, commit=True)
# Rendering and logging images of levels
if epoch % 500 == 0 or epoch == (opt.niter - 1):
token_list = opt.token_list
to_level = one_hot_to_blockdata_level
try:
subprocess.call(["wine", '--version'])
real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type)
# Minecraft World
worldname = 'Curr_Empty_World'
clear_empty_world(opt.output_dir, worldname) # reset tmp world
to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type),
to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)]
render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"]
obj_pth = os.path.join(opt.out_, f"objects/{current_scale}")
os.makedirs(obj_pth, exist_ok=True)
for n, level in enumerate(to_render):
pos = n * (level.shape[0] + 5)
save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props)
curr_coords = [[pos, pos + real_scaled.shape[0]],
[0, real_scaled.shape[1]],
[0, real_scaled.shape[2]]]
render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n])
wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False)
except OSError:
pass
# Learning Rate scheduler step
schedulerD.step()
schedulerG.step()
# Save networks
if opt.use_multiple_inputs:
z_opt = z_opt_group
torch.save(z_opt, "%s/z_opt.pth" % opt.outf)
save_networks(G, D, z_opt, opt)
wandb.save(opt.outf)
return z_opt, input_from_prev_scale, G
| 0 | 0 | 0 |
208ae917bbef354a4b5265c6bf1e185fab333cfc | 1,985 | py | Python | appimagelint/checks/desktop_files.py | srevinsaju/appimagelint | 51b4e5543fb4a13d1e6e71b45ca46e8060daa7fe | [
"MIT"
] | 24 | 2019-04-08T22:04:31.000Z | 2022-01-28T19:09:34.000Z | appimagelint/checks/desktop_files.py | srevinsaju/appimagelint | 51b4e5543fb4a13d1e6e71b45ca46e8060daa7fe | [
"MIT"
] | 24 | 2019-04-13T02:08:27.000Z | 2022-03-18T20:18:07.000Z | appimagelint/checks/desktop_files.py | srevinsaju/appimagelint | 51b4e5543fb4a13d1e6e71b45ca46e8060daa7fe | [
"MIT"
] | 6 | 2019-06-10T12:59:51.000Z | 2021-05-28T14:42:10.000Z | import shutil
import subprocess
from pathlib import Path
from appimagelint.models import TestResult
from ..models import AppImage
from . import CheckBase
| 34.824561 | 154 | 0.633249 | import shutil
import subprocess
from pathlib import Path
from appimagelint.models import TestResult
from ..models import AppImage
from . import CheckBase
class DesktopFilesCheck(CheckBase):
def __init__(self, appimage: AppImage):
super().__init__(appimage)
@staticmethod
def name():
return "Desktop files existence and validity"
@staticmethod
def id():
return "desktop_files"
def run(self):
logger = self.get_logger()
with self._appimage.mount() as mountpoint:
# find desktop files in AppDir root
root_desktop_files = set(map(str, Path(mountpoint).glob("*.desktop")))
# search entire AppDir for desktop files
all_desktop_files = set(map(str, Path(mountpoint).rglob("*.desktop")))
logger.info("Checking desktop files in root directory")
exactly_one_file_in_root = len(root_desktop_files) == 1
yield TestResult(exactly_one_file_in_root, "desktop_files_check.exactly_one_in_root", "Exactly one desktop file in AppDir root")
dfv_cmd_name = "desktop-file-validate"
dfv_cmd_path = shutil.which(dfv_cmd_name)
validation_results = {}
if not dfv_cmd_path:
logger.error("could not find {}, skipping desktop file checks".format(dfv_cmd_name))
else:
for desktop_file in all_desktop_files:
logger.info("Checking desktop file {} with {}".format(desktop_file, dfv_cmd_name))
success = True
try:
subprocess.check_call([dfv_cmd_path, desktop_file])
except subprocess.SubprocessError:
success = False
validation_results[desktop_file] = success
yield TestResult(all(validation_results.values()), "desktop_files_check.all_desktop_files_valid", "All desktop files in AppDir are valid")
| 1,649 | 157 | 23 |
5d89df9aec90bf9a7fbbad8609de99387bb74a7a | 6,460 | py | Python | analysis_vis/scripts/PromisPlotMetrics.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | analysis_vis/scripts/PromisPlotMetrics.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | analysis_vis/scripts/PromisPlotMetrics.py | arubenstein/deep_seq | 96c2bc131dc3bd3afb05486bfbc6f7297c57e604 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""Plot all graph metrics as histograms"""
import itertools
import sys
import operator
import numpy as np
import argparse
from general_seq import conv
from general_seq import seq_IO
from plot import conv as pconv
from plot import hist
from collections import Counter
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--list_nodes', '-d', nargs=2, action='append', help="text file which contains sequences and the label you want to use for the set")
parser.add_argument ('--output_prefix', help='output file prefix')
parser.add_argument ('--metric', default="metrics", help='name of metric to plot. To plot all metrics, input metrics')
args = parser.parse_args()
main(args.list_nodes, args.output_prefix, args.metric)
| 44.551724 | 257 | 0.680805 | #!/usr/bin/env python
"""Plot all graph metrics as histograms"""
import itertools
import sys
import operator
import numpy as np
import argparse
from general_seq import conv
from general_seq import seq_IO
from plot import conv as pconv
from plot import hist
from collections import Counter
import matplotlib.pyplot as plt
def get_data_from_dict( sequence_dict, label ):
list_metrics = [ val[label] for key, val in sequence_dict.items() ]
return list_metrics
def average_metrics( dict_metrics, metric_labels ):
averaged_metrics = { s : {} for s in dict_metrics.keys() }
for seq, list_metrics in dict_metrics.items():
for label in metric_labels:
l = [ val[label] for val in list_metrics ]
if len(l) > 0:
averaged_metrics[seq][label] = float(sum(l))/len(l)
return averaged_metrics
def list_metrics( cleaved_seq_dict, sequences, labels ):
dict_metrics = {}
for seq in sequences:
list_metrics = [ d.get(seq) for d in cleaved_seq_dict.values() if d.get(seq) is not None ]
dict_metrics[seq] = list_metrics
return average_metrics(dict_metrics, labels)
def average_fraction_neighbors_cleaved( cleaved_seq_dict, middle_seq_dict, uncleaved_seq_dict, list_cleaved_seqs ):
metrics_dict = { s : [] for s in list_cleaved_seqs }
for label in cleaved_seq_dict.keys():
seqs_fracs = conv.fraction_neighbors_cleaved(cleaved_seq_dict[label].keys(), middle_seq_dict[label].keys(), uncleaved_seq_dict[label].keys(), list_cleaved_seqs, test_existence=True)
for seq, frac in seqs_fracs.items():
metrics_dict[seq].append({"fraction_cleaved" : frac })
am = average_metrics( metrics_dict, ["fraction_cleaved"])
return [ v for s, l in am.items() for k, v in l.items() ]
def main(list_nodes, output_prefix, metric):
cleaved_seq = {}
uncleaved_seq = {}
middle_seq = {}
for nodes, label in list_nodes:
sequences = seq_IO.read_sequences(nodes, additional_params=True, header=True)
cleaved_seq[label] = { key : val for key, val in sequences.items() if val["type"] == "CLEAVED" }
middle_seq[label] = { key : val for key, val in sequences.items() if val["type"] == "MIDDLE" }
uncleaved_seq[label] = { key : val for key, val in sequences.items() if val["type"] == "UNCLEAVED" }
if metric == "metrics":
labels_non_plot = ["label", "fitness", "type", "canonical"]
orig_labels_to_plot = sorted([ key for key in sequences["DEMEE"].keys() if key not in labels_non_plot ])
labels_to_plot = sorted(orig_labels_to_plot)
else:
orig_labels_to_plot = [metric]
labels_to_plot = [metric]
n_to_plot = len(labels_to_plot)
fig, axarr = pconv.create_ax(n_to_plot, 1, shx=False, shy=False)
nbins = 10
list_seqs = [ k for d in cleaved_seq.values() for k in d.keys() ]
count_seqs = Counter(list_seqs)
#seqs_5_l = [ s for s in list_seqs if count_seqs[s] == 5 ]
seqs_4_l = [ s for s in list_seqs if count_seqs[s] == 4 ]
seqs_3_l = [ s for s in list_seqs if count_seqs[s] == 3 ]
seqs_2_l = [ s for s in list_seqs if count_seqs[s] == 2 ]
seqs_1_l = [ s for s in list_seqs if count_seqs[s] == 1 ]
if metric != "Fraction_Cleaved":
#seqs_5 = list_metrics( cleaved_seq, seqs_5_l, orig_labels_to_plot)
seqs_4 = list_metrics( cleaved_seq, seqs_4_l, orig_labels_to_plot)
seqs_3 = list_metrics( cleaved_seq, seqs_3_l, orig_labels_to_plot)
seqs_2 = list_metrics( cleaved_seq, seqs_2_l, orig_labels_to_plot)
seqs_1 = list_metrics( cleaved_seq, seqs_1_l, orig_labels_to_plot)
for ind, key in enumerate(labels_to_plot):
if key == "pageranks":
log = True
else:
log = False
if key == "Fraction_Cleaved":
data = [ #average_fraction_neighbors_cleaved(cleaved_seq, uncleaved_seq, middle_seq, seqs_5_l),
average_fraction_neighbors_cleaved(cleaved_seq, uncleaved_seq, middle_seq, seqs_4_l),
average_fraction_neighbors_cleaved(cleaved_seq, uncleaved_seq, middle_seq, seqs_3_l),
average_fraction_neighbors_cleaved(cleaved_seq, uncleaved_seq, middle_seq, seqs_2_l),
average_fraction_neighbors_cleaved(cleaved_seq, uncleaved_seq, middle_seq, seqs_1_l)]
normed=True
else:
data = [ #get_data_from_dict(seqs_5, key),
get_data_from_dict(seqs_1, key), get_data_from_dict(seqs_2, key), get_data_from_dict(seqs_3, key), get_data_from_dict(seqs_4, key) ]
normed=True
hist.draw_actual_plot(axarr[0,ind], data, "", key.capitalize(), colors = [ tuple(c) for c in plt.cm.Blues(np.linspace(0.2, 1, 4)).tolist()], log=log, normed=normed, label=["Cl. by 5", "Cl. by 4", "Cl. by 3", "Cl. by 2", "Cl. by 1"], nbins=nbins)
axarr[0,ind].ticklabel_format(axis='x', style='sci', scilimits=(-2,2))
#pconv.add_legend(axarr[0,ind], location="upper right")
pconv.save_fig(fig, output_prefix, metric, n_to_plot*3, 3, tight=True, size=9)
fig_bar, axarr_bar = pconv.create_ax(1, 1, shx=False, shy=False)
gradient = np.linspace(1, 0.2, 256)
#gradient = np.hstack((gradient, gradient))
gradient = np.array(zip(gradient,gradient))
axarr_bar[0,0].imshow(gradient, aspect='auto', cmap=plt.get_cmap('Blues'))
#axarr_bar[0,0].set_axis_off()
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off', # labels along the bottom edge are off
left='off', # ticks along the bottom edge are off
right='off', # ticks along the top edge are off
labelright='off') # labels along the bottom edge are off
pconv.save_fig(fig_bar, output_prefix, "colorbar", 0.3, 3, tight=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--list_nodes', '-d', nargs=2, action='append', help="text file which contains sequences and the label you want to use for the set")
parser.add_argument ('--output_prefix', help='output file prefix')
parser.add_argument ('--metric', default="metrics", help='name of metric to plot. To plot all metrics, input metrics')
args = parser.parse_args()
main(args.list_nodes, args.output_prefix, args.metric)
| 5,482 | 0 | 121 |
f5e52d8f8be3ea6b39880e3f6eb0aa7a22e1d02d | 5,393 | py | Python | qtapp/widgets/C_QNavigator.py | philipdavis82/ModernQt | 2708f4227e4d9a6e9f7d3a0987a79a4d34e45b6a | [
"MIT"
] | 1 | 2021-08-11T01:58:27.000Z | 2021-08-11T01:58:27.000Z | qtapp/widgets/C_QNavigator.py | philipdavis82/ModernQt | 2708f4227e4d9a6e9f7d3a0987a79a4d34e45b6a | [
"MIT"
] | null | null | null | qtapp/widgets/C_QNavigator.py | philipdavis82/ModernQt | 2708f4227e4d9a6e9f7d3a0987a79a4d34e45b6a | [
"MIT"
] | null | null | null | # Left side navigator
import __global__,os
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
NAVIGATION_STYLE_SHEET="""
QWidget
{
color: #1f1f1f;
background-color: #1f1f1f;
}
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #2f2f2f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
NAVIGATION_STYLE_SHEET_ACTIVATED="""
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #5f5f5f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
# clicked = QtCore.pyqtSignal()
# def paintEvent(self,event):
# painter = QtGui.QPainter(self)
# width = self.width()
# height = self.height()
# if self.__highlight: painter.fillRect(0,0,width,height,self.__highlightbrush)
# else : painter.fillRect(0,0,width,height,self.__brush)
# Internal Functions | 30.297753 | 132 | 0.656036 | # Left side navigator
import __global__,os
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
NAVIGATION_STYLE_SHEET="""
QWidget
{
color: #1f1f1f;
background-color: #1f1f1f;
}
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #2f2f2f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
NAVIGATION_STYLE_SHEET_ACTIVATED="""
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #5f5f5f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
class _C_QNavButton(QtWidgets.QPushButton):
# clicked = QtCore.pyqtSignal()
def __init__(self,parent,name:str,icon:str=None,*args,**kwargs):
super().__init__(parent)
self.setStyleSheet(NAVIGATION_STYLE_SHEET)
self.__name = name
self.setToolTip(name)
# self.__img = img
self.setMinimumHeight(50)
# Visual Classes
self.__brush = QtGui.QBrush(QtGui.QColor(0x2f2f2f))
self.__highlightbrush = QtGui.QBrush(QtGui.QColor(0x5f5f5f))
self.__outlinePen = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0x2f2f2f)),5)
self.__highlightPen = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0x5f5f5f)),5)
# Internal State Machine
self.__highlight = False
self.__layout = QtWidgets.QHBoxLayout(self)
if icon is None: self.__icon = QtGui.QIcon( os.path.join(__global__.MEDIA_DIR,"broken.svg") )
else : self.__icon = QtGui.QIcon( os.path.join(__global__.MEDIA_DIR,icon) )
self.__iconlbl = QtWidgets.QLabel(self)
self.__iconlbl.setPixmap(self.__icon.pixmap(self.__iconlbl.size()))
self.__layout.addWidget(self.__iconlbl)
self.__layout.setContentsMargins(4,4,4,4)
def activate(self):
self.setStyleSheet(NAVIGATION_STYLE_SHEET_ACTIVATED)
def deactivate(self):
self.setStyleSheet(NAVIGATION_STYLE_SHEET)
def resizeEvent(self,event):
self.__iconlbl.setPixmap(self.__icon.pixmap(self.__iconlbl.size()))
def enterEvent(self,event):
self.__highlight = True
def leaveEvent(self,event):
self.__highlight = False
def mousePressEvent(self,event):
self.clicked.emit()
# def paintEvent(self,event):
# painter = QtGui.QPainter(self)
# width = self.width()
# height = self.height()
# if self.__highlight: painter.fillRect(0,0,width,height,self.__highlightbrush)
# else : painter.fillRect(0,0,width,height,self.__brush)
# Internal Functions
def __buildButton(self):
pass
class C_QNavigator(QtWidgets.QWidget):
onPress = QtCore.pyqtSignal(str)
onHover = QtCore.pyqtSignal(str)
def __init__(self,parent,*args,**kwargs):
super().__init__(parent)
self.setAttribute(QtCore.Qt.WA_StyledBackground)
self.setMaximumWidth(50)
self.setStyleSheet(NAVIGATION_STYLE_SHEET)
self.__buttonList = {}
self.__widgetList = {}
self.__layout = QtWidgets.QGridLayout(self)
self.__layout.setSpacing(0)
self.__layout.setContentsMargins(0,0,0,0)
self.__layout.addItem(QtWidgets.QSpacerItem(0, 99, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding),99,0,1,1)
# Visual Classes
self.__brush = QtGui.QBrush(QtGui.QColor(0x1f1f1f))
self.__outlinePen = QtGui.QPen(self.__brush,5)
# State Machine information
self.__repaint = False
self.__lastActive = None
# Set Style
# def resizeEvent(self,event):
# self.__repaint = True
# def paintEvent(self,event):
# if not self.__repaint : return
# self.__repaint = True
# painter = QtGui.QPainter(self)
# painter.setPen(self.__outlinePen)
# width = self.width()
# height = self.height()
# painter.fillRect(0,0,width,height,self.__brush)
def addButton(self,name:str,widget):
icon = None
try : icon = widget.icon()
except : print(f"Failed to call icon() method on widget {widget}")
self.__buttonList[name] = _C_QNavButton(self,name,icon)
self.__widgetList[name] = widget
self.__layout.addWidget(self.__buttonList[name],len(self.__buttonList)-1,0)
self.__buttonList[name].clicked.connect(lambda: self.__buttonPressed(name))
def __buttonPressed(self,name):
if self.__lastActive == name: return
self.__buttonList[name].activate()
if not self.__lastActive is None: self.__buttonList[self.__lastActive].deactivate()
self.__lastActive = name
self.onPress.emit(name)
def setWidget(self,name):
return self.__buttonPressed(name)
class C_QNavigatorDock(QtWidgets.QDockWidget):
def __init__(self,parent,nav):
super().__init__(parent)
self.__Nav = nav
self.__parent = parent
self.setContentsMargins(0,0,0,0)
self.setWidget(self.__Nav)
self.setFloating(False)
self.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures) | 3,236 | 645 | 309 |
eca278c2ee6deb3d003508dc2e1fdd4ff13899ae | 1,127 | py | Python | soco/exceptions.py | Nnamdi/sonos-group-manager | 0887031551f47cd27e55a31b0293138f659e703f | [
"MIT"
] | 3 | 2015-08-24T18:34:23.000Z | 2017-03-07T11:00:22.000Z | soco/exceptions.py | Nnamdi/sonos-group-manager | 0887031551f47cd27e55a31b0293138f659e703f | [
"MIT"
] | null | null | null | soco/exceptions.py | Nnamdi/sonos-group-manager | 0887031551f47cd27e55a31b0293138f659e703f | [
"MIT"
] | 1 | 2021-07-18T03:19:07.000Z | 2021-07-18T03:19:07.000Z | # -*- coding: utf-8 -*-
""" Exceptions that are used by SoCo """
class SoCoException(Exception):
""" base exception raised by SoCo, containing the UPnP error code """
class UnknownSoCoException(SoCoException):
""" raised if reason of the error can not be extracted
The exception object will contain the raw response sent back from the
speaker """
class SoCoUPnPException(SoCoException):
""" encapsulates UPnP Fault Codes raised in response to actions sent over
the network """
class CannotCreateDIDLMetadata(SoCoException):
""" Raised if a data container class cannot create the DIDL metadata due to
missing information
"""
class UnknownXMLStructure(SoCoException):
"""Raised if XML with and unknown or unexpected structure is returned"""
| 27.487805 | 79 | 0.709849 | # -*- coding: utf-8 -*-
""" Exceptions that are used by SoCo """
class SoCoException(Exception):
""" base exception raised by SoCo, containing the UPnP error code """
class UnknownSoCoException(SoCoException):
""" raised if reason of the error can not be extracted
The exception object will contain the raw response sent back from the
speaker """
class SoCoUPnPException(SoCoException):
""" encapsulates UPnP Fault Codes raised in response to actions sent over
the network """
def __init__(self, message, error_code, error_xml, error_description=""):
super(SoCoUPnPException, self).__init__()
self.message = message
self.error_code = error_code
self.error_description = error_description
self.error_xml = error_xml
def __str__(self):
return self.message
class CannotCreateDIDLMetadata(SoCoException):
""" Raised if a data container class cannot create the DIDL metadata due to
missing information
"""
class UnknownXMLStructure(SoCoException):
"""Raised if XML with and unknown or unexpected structure is returned"""
| 281 | 0 | 54 |
a8225798514d9d01e55862c8b25f9000b322d24a | 95 | py | Python | chia/types/spend_bundle_conditions.py | nur-azhar/chia-blockchain | 890da94024b4742bbbb93e47f72113e8344a20b3 | [
"Apache-2.0"
] | 1 | 2022-03-22T18:11:52.000Z | 2022-03-22T18:11:52.000Z | chia/types/spend_bundle_conditions.py | nur-azhar/chia-blockchain | 890da94024b4742bbbb93e47f72113e8344a20b3 | [
"Apache-2.0"
] | null | null | null | chia/types/spend_bundle_conditions.py | nur-azhar/chia-blockchain | 890da94024b4742bbbb93e47f72113e8344a20b3 | [
"Apache-2.0"
] | null | null | null | from chia_rs import Spend, SpendBundleConditions
__all__ = ["Spend", "SpendBundleConditions"]
| 23.75 | 48 | 0.8 | from chia_rs import Spend, SpendBundleConditions
__all__ = ["Spend", "SpendBundleConditions"]
| 0 | 0 | 0 |
f671136c39d6d927ecbbd01627b9ff662d333587 | 3,317 | py | Python | BioSQL/DBUtils.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | 3 | 2017-10-23T21:53:57.000Z | 2019-09-23T05:14:12.000Z | BioSQL/DBUtils.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | null | null | null | BioSQL/DBUtils.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | 6 | 2020-02-26T16:34:20.000Z | 2020-03-04T15:34:00.000Z | # Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2008 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
_dbutils = {}
# Disabled: better safe than sorry
## def next_id(self, cursor, table):
## # XXX brain-dead! Hopefully, the database will enforce PK unicity..
## table = self.tname(table)
## sql = r"select 1+max(%s_id) from %s" % (table, table)
## cursor.execute(sql)
## rv = cursor.fetchone()
## return rv[0]
_dbutils["MySQLdb"] = Mysql_dbutils
_dbutils["psycopg"] = Psycopg_dbutils
_dbutils["psycopg2"] = Psycopg2_dbutils
class Pgdb_dbutils(Generic_dbutils):
"""Add support for pgdb in the PyGreSQL database connectivity package.
"""
_dbutils["pgdb"] = Pgdb_dbutils
| 30.154545 | 77 | 0.621043 | # Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2008 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
_dbutils = {}
class Generic_dbutils:
def __init__(self):
pass
def tname(self, table):
if table != 'biosequence': return table
else: return 'bioentry'
# Disabled: better safe than sorry
## def next_id(self, cursor, table):
## # XXX brain-dead! Hopefully, the database will enforce PK unicity..
## table = self.tname(table)
## sql = r"select 1+max(%s_id) from %s" % (table, table)
## cursor.execute(sql)
## rv = cursor.fetchone()
## return rv[0]
def last_id(self, cursor, table):
# XXX: Unsafe without transactions isolation
table = self.tname(table)
sql = r"select max(%s_id) from %s" % (table, table)
cursor.execute(sql)
rv = cursor.fetchone()
return rv[0]
def autocommit(self, conn, y = 1):
# Let's hope it was not really needed
pass
class Mysql_dbutils(Generic_dbutils):
def last_id(self, cursor, table):
try :
#This worked on older versions of MySQL
return cursor.insert_id()
except AttributeError:
#See bug 2390
#Google suggests this is the new way,
#same fix also suggested by Eric Gibert:
return cursor.lastrowid
_dbutils["MySQLdb"] = Mysql_dbutils
class Psycopg_dbutils(Generic_dbutils):
def next_id(self, cursor, table):
table = self.tname(table)
sql = r"select nextval('%s_pk_seq')" % table
cursor.execute(sql)
rv = cursor.fetchone()
return rv[0]
def last_id(self, cursor, table):
table = self.tname(table)
sql = r"select currval('%s_pk_seq')" % table
cursor.execute(sql)
rv = cursor.fetchone()
return rv[0]
def autocommit(self, conn, y = True):
conn.autocommit(y)
_dbutils["psycopg"] = Psycopg_dbutils
class Psycopg2_dbutils(Psycopg_dbutils):
def autocommit(self, conn, y = True):
if y:
conn.set_isolation_level(0)
else:
conn.set_isolation_level(1)
_dbutils["psycopg2"] = Psycopg2_dbutils
class Pgdb_dbutils(Generic_dbutils):
"""Add support for pgdb in the PyGreSQL database connectivity package.
"""
def next_id(self, cursor, table):
table = self.tname(table)
sql = r"select nextval('%s_pk_seq')" % table
cursor.execute(sql)
rv = cursor.fetchone()
return rv[0]
def last_id(self, cursor, table):
table = self.tname(table)
sql = r"select currval('%s_pk_seq')" % table
cursor.execute(sql)
rv = cursor.fetchone()
return rv[0]
def autocommit(self, conn, y = True):
raise NotImplementedError("pgdb does not support this!")
_dbutils["pgdb"] = Pgdb_dbutils
def get_dbutils(module_name):
try:
return _dbutils[module_name]()
except:
return Generic_dbutils()
| 1,781 | 54 | 451 |
0c9f5c9ba17b1f5a062e65f32a46be2ef6093556 | 926 | py | Python | locators.py | kiryushah/vklogin | a274947c4cb1d9ba3a4585776ed2b56a1d9c078b | [
"Apache-2.0"
] | null | null | null | locators.py | kiryushah/vklogin | a274947c4cb1d9ba3a4585776ed2b56a1d9c078b | [
"Apache-2.0"
] | null | null | null | locators.py | kiryushah/vklogin | a274947c4cb1d9ba3a4585776ed2b56a1d9c078b | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.common.by import By
| 46.3 | 75 | 0.656587 | from selenium.webdriver.common.by import By
class MainPageLocators(object):
LOGIN_FIRST = (By.XPATH, "//input[@id='index_email']")
PASS_FIRST = (By.XPATH, "//input[@id='index_pass']")
FOREIGN_COMP_FIRST = (By.XPATH, "//div[@id='index_expire']")
GO_BUTTON_FIRST = (By.XPATH, "//button[@id='index_login_button']")
OPTION_FIRST = (By.XPATH, "//div/a[@id='top_profile_link']")
OUT_FIRST = (By.XPATH, "//div/a[@id='top_logout_link']")
LOGIN_AFTER_OUT = (By.XPATH, "//input[@id='quick_email']")
PASS_AFTER_OUT = (By.XPATH, "//input[@id='quick_pass']")
FOREIGN_COMP_AFTER_OUT = (By.XPATH, "//div[@id='quick_expire']")
GO_BUTTON_AFTER_OUT = (By.XPATH, "//button[@id='quick_login_button']")
LOGIN_AFTER_INCORRECT = (By.XPATH, "//input[@id='email']")
PASS_AFTER_INCORRECT = (By.XPATH, "//input[@id='pass']")
GO_BUTTON_AFTER_INCORRECT = (By.XPATH, "//button[@id='login_button']")
| 0 | 858 | 23 |
0996f21924677109d171c9170d7a252f867da53e | 132 | py | Python | src/Config/__init__.py | gr4ph0s/c4d_corona_light_lister | 8162579f9ffbd7872bdf317de959114789da2808 | [
"MIT"
] | 3 | 2019-07-11T22:38:46.000Z | 2020-10-01T14:03:18.000Z | src/Config/__init__.py | gr4ph0s/c4d_corona_light_lister | 8162579f9ffbd7872bdf317de959114789da2808 | [
"MIT"
] | null | null | null | src/Config/__init__.py | gr4ph0s/c4d_corona_light_lister | 8162579f9ffbd7872bdf317de959114789da2808 | [
"MIT"
] | null | null | null | from .ConfigManager import ConfigManager
from .ConfigManagerCorona import ConfigManagerCorona
from .JsonFunction import JsonFunction | 44 | 52 | 0.893939 | from .ConfigManager import ConfigManager
from .ConfigManagerCorona import ConfigManagerCorona
from .JsonFunction import JsonFunction | 0 | 0 | 0 |
8619c950c392865b21c93eb16b05442fb12255e2 | 3,526 | py | Python | cloudnetpy/products/product_tools.py | MoLochmann/cloudnetpy | 328a54eecc0ce0fe5a3bb2018782c8de19ef7216 | [
"MIT"
] | 1 | 2019-12-05T19:59:24.000Z | 2019-12-05T19:59:24.000Z | cloudnetpy/products/product_tools.py | KarlJohnsonnn/cloudnetpy | eae2966a515829108899a527b8d34ddff2472124 | [
"MIT"
] | null | null | null | cloudnetpy/products/product_tools.py | KarlJohnsonnn/cloudnetpy | eae2966a515829108899a527b8d34ddff2472124 | [
"MIT"
] | null | null | null | """General helper classes and functions for all products."""
import netCDF4
import cloudnetpy.utils as utils
class CategorizeBits:
"""Class holding information about category and quality bits.
Args:
categorize_file (str): Categorize file name.
Attributes:
category_bits (dict): Dictionary containing boolean fields for `droplet`,
`falling`, `cold`, `melting`, `aerosol`, `insect`.
quality_bits (dict): Dictionary containing boolean fields for `radar`,
`lidar`, `clutter`, `molecular`, `attenuated`, `corrected`.
"""
category_keys = ('droplet', 'falling', 'cold', 'melting', 'aerosol',
'insect')
quality_keys = ('radar', 'lidar', 'clutter', 'molecular', 'attenuated',
'corrected')
def _read_bits(self, bit_type):
""" Converts bitfield into dictionary."""
nc = netCDF4.Dataset(self._categorize_file)
bitfield = nc.variables[f"{bit_type}_bits"][:]
keys = getattr(CategorizeBits, f"{bit_type}_keys")
bits = {key: utils.isbit(bitfield, i) for i, key in enumerate(keys)}
nc.close()
return bits
class ProductClassification(CategorizeBits):
"""Base class for creating different classifications in the child classes
of various Cloudnet products. Child of CategorizeBits class.
Args:
categorize_file (str): Categorize file name.
Attributes:
is_rain (ndarray): 1D array denoting rainy profiles.
is_undetected_melting (ndarray): 1D array denoting profiles which should
contain melting layer but was not detected from the data.
"""
def read_nc_fields(nc_file, names):
"""Reads selected variables from a netCDF file.
Args:
nc_file (str): netCDF file name.
names (str/list): Variables to be read, e.g. 'temperature' or
['ldr', 'lwp'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
names = [names] if isinstance(names, str) else names
nc = netCDF4.Dataset(nc_file)
data = [nc.variables[name][:] for name in names]
nc.close()
return data[0] if len(data) == 1 else data
def interpolate_model(cat_file, names):
"""Interpolates 2D model field into dense Cloudnet grid.
Args:
cat_file (str): Categorize file name.
names (str/list): Model variable to be interpolated, e.g.
'temperature' or ['temperature', 'pressure'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
names = [names] if isinstance(names, str) else names
data = [_interp_field(name) for name in names]
return data[0] if len(data) == 1 else data
| 34.23301 | 81 | 0.637266 | """General helper classes and functions for all products."""
import netCDF4
import cloudnetpy.utils as utils
class CategorizeBits:
"""Class holding information about category and quality bits.
Args:
categorize_file (str): Categorize file name.
Attributes:
category_bits (dict): Dictionary containing boolean fields for `droplet`,
`falling`, `cold`, `melting`, `aerosol`, `insect`.
quality_bits (dict): Dictionary containing boolean fields for `radar`,
`lidar`, `clutter`, `molecular`, `attenuated`, `corrected`.
"""
category_keys = ('droplet', 'falling', 'cold', 'melting', 'aerosol',
'insect')
quality_keys = ('radar', 'lidar', 'clutter', 'molecular', 'attenuated',
'corrected')
def __init__(self, categorize_file):
self._categorize_file = categorize_file
self.category_bits = self._read_bits('category')
self.quality_bits = self._read_bits('quality')
def _read_bits(self, bit_type):
""" Converts bitfield into dictionary."""
nc = netCDF4.Dataset(self._categorize_file)
bitfield = nc.variables[f"{bit_type}_bits"][:]
keys = getattr(CategorizeBits, f"{bit_type}_keys")
bits = {key: utils.isbit(bitfield, i) for i, key in enumerate(keys)}
nc.close()
return bits
class ProductClassification(CategorizeBits):
"""Base class for creating different classifications in the child classes
of various Cloudnet products. Child of CategorizeBits class.
Args:
categorize_file (str): Categorize file name.
Attributes:
is_rain (ndarray): 1D array denoting rainy profiles.
is_undetected_melting (ndarray): 1D array denoting profiles which should
contain melting layer but was not detected from the data.
"""
def __init__(self, categorize_file):
super().__init__(categorize_file)
self.is_rain = read_nc_fields(categorize_file, 'is_rain')
self.is_undetected_melting = read_nc_fields(categorize_file,
'is_undetected_melting')
def read_nc_fields(nc_file, names):
"""Reads selected variables from a netCDF file.
Args:
nc_file (str): netCDF file name.
names (str/list): Variables to be read, e.g. 'temperature' or
['ldr', 'lwp'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
names = [names] if isinstance(names, str) else names
nc = netCDF4.Dataset(nc_file)
data = [nc.variables[name][:] for name in names]
nc.close()
return data[0] if len(data) == 1 else data
def interpolate_model(cat_file, names):
"""Interpolates 2D model field into dense Cloudnet grid.
Args:
cat_file (str): Categorize file name.
names (str/list): Model variable to be interpolated, e.g.
'temperature' or ['temperature', 'pressure'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
def _interp_field(var_name):
values = read_nc_fields(cat_file, ['model_time', 'model_height',
var_name, 'time', 'height'])
return utils.interpolate_2d(*values)
names = [names] if isinstance(names, str) else names
data = [_interp_field(name) for name in names]
return data[0] if len(data) == 1 else data
| 641 | 0 | 79 |
7456be1dd24d12db9457d398933f4ea547e55224 | 2,380 | py | Python | iniesta/config.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | 1 | 2021-03-14T08:27:43.000Z | 2021-03-14T08:27:43.000Z | iniesta/config 2.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | 1 | 2020-10-08T08:14:04.000Z | 2020-10-08T08:14:04.000Z | iniesta/config.py | crazytruth/iniesta | 1e1cc079d04758f319c6bcee4a8a14a176e7b24e | [
"MIT"
] | null | null | null | from typing import Optional, Dict, List
#: The redlock caches
INIESTA_CACHES: Dict[str, dict] = {
"iniesta1": {"HOST": "localhost", "PORT": 6379, "DATABASE": 1},
"iniesta2": {"HOST": "localhost", "PORT": 6379, "DATABASE": 2},
"iniesta3": {"HOST": "localhost", "PORT": 6379, "DATABASE": 3},
}
#: The initialization type Iniesta will be initialized with.
INIESTA_INITIALIZATION_TYPE: tuple = tuple()
# ["SNS_PRODUCER", "EVENT_POLLING", "QUEUE_POLLING", "CUSTOM"]
#: The topic arn for the SNS that will receive messages.
INIESTA_SNS_PRODUCER_GLOBAL_TOPIC_ARN: str = None
#: The number of messages to receive while polling. Value between 0-10
INIESTA_SQS_RECEIVE_MESSAGE_MAX_NUMBER_OF_MESSAGES: int = 10
#: The time to wait between receiving SQS messages. A value between 0-20 (0 for short polling).
INIESTA_SQS_RECEIVE_MESSAGE_WAIT_TIME_SECONDS: int = 20
# possible filters:
# if ends with ".*" then filter is concerted to prefix
# reference: https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html
#: The filters you would like for your application's queue to filter for.
INIESTA_SQS_CONSUMER_FILTERS: List[str] = []
#: If you would like to verify the filter policies on AWS match the filter policies declared in your application.
INIESTA_ASSERT_FILTER_POLICIES: bool = True
#: The event key that will be filtered.
INIESTA_SNS_EVENT_KEY: str = "iniesta_pass"
#: The default sqs queue name
INIESTA_SQS_QUEUE_NAME: Optional[str] = None
#: The SQS queue name template, if you have a normalized queue naming scheme.
INIESTA_SQS_QUEUE_NAME_TEMPLATE: str = "iniesta-{env}-{service_name}"
#: The retry count for attempting to acquire a lock.
INIESTA_LOCK_RETRY_COUNT: int = 1
#: The lock timeout for the message. Will release after defined value.
INIESTA_LOCK_TIMEOUT: int = 10
# mainly used for tests
# INIESTA_SQS_REGION_NAME: Optional[str] = None
INIESTA_SQS_ENDPOINT_URL: Optional[str] = None
#
# INIESTA_SNS_REGION_NAME: Optional[str] = None
INIESTA_SNS_ENDPOINT_URL: Optional[str] = None
INIESTA_DRY_RUN: bool = False
#: Your AWS Access Key if it is different from other access keys.
INIESTA_AWS_ACCESS_KEY_ID = None
#: Your AWS Secret Access Key if it is different from other access keys.
INIESTA_AWS_SECRET_ACCESS_KEY = None
#: Your AWS Default Region if it is iniesta specific
INIESTA_AWS_DEFAULT_REGION: Optional[str] = None
| 36.615385 | 113 | 0.764286 | from typing import Optional, Dict, List
#: The redlock caches
INIESTA_CACHES: Dict[str, dict] = {
"iniesta1": {"HOST": "localhost", "PORT": 6379, "DATABASE": 1},
"iniesta2": {"HOST": "localhost", "PORT": 6379, "DATABASE": 2},
"iniesta3": {"HOST": "localhost", "PORT": 6379, "DATABASE": 3},
}
#: The initialization type Iniesta will be initialized with.
INIESTA_INITIALIZATION_TYPE: tuple = tuple()
# ["SNS_PRODUCER", "EVENT_POLLING", "QUEUE_POLLING", "CUSTOM"]
#: The topic arn for the SNS that will receive messages.
INIESTA_SNS_PRODUCER_GLOBAL_TOPIC_ARN: str = None
#: The number of messages to receive while polling. Value between 0-10
INIESTA_SQS_RECEIVE_MESSAGE_MAX_NUMBER_OF_MESSAGES: int = 10
#: The time to wait between receiving SQS messages. A value between 0-20 (0 for short polling).
INIESTA_SQS_RECEIVE_MESSAGE_WAIT_TIME_SECONDS: int = 20
# possible filters:
# if ends with ".*" then filter is concerted to prefix
# reference: https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html
#: The filters you would like for your application's queue to filter for.
INIESTA_SQS_CONSUMER_FILTERS: List[str] = []
#: If you would like to verify the filter policies on AWS match the filter policies declared in your application.
INIESTA_ASSERT_FILTER_POLICIES: bool = True
#: The event key that will be filtered.
INIESTA_SNS_EVENT_KEY: str = "iniesta_pass"
#: The default sqs queue name
INIESTA_SQS_QUEUE_NAME: Optional[str] = None
#: The SQS queue name template, if you have a normalized queue naming scheme.
INIESTA_SQS_QUEUE_NAME_TEMPLATE: str = "iniesta-{env}-{service_name}"
#: The retry count for attempting to acquire a lock.
INIESTA_LOCK_RETRY_COUNT: int = 1
#: The lock timeout for the message. Will release after defined value.
INIESTA_LOCK_TIMEOUT: int = 10
# mainly used for tests
# INIESTA_SQS_REGION_NAME: Optional[str] = None
INIESTA_SQS_ENDPOINT_URL: Optional[str] = None
#
# INIESTA_SNS_REGION_NAME: Optional[str] = None
INIESTA_SNS_ENDPOINT_URL: Optional[str] = None
INIESTA_DRY_RUN: bool = False
#: Your AWS Access Key if it is different from other access keys.
INIESTA_AWS_ACCESS_KEY_ID = None
#: Your AWS Secret Access Key if it is different from other access keys.
INIESTA_AWS_SECRET_ACCESS_KEY = None
#: Your AWS Default Region if it is iniesta specific
INIESTA_AWS_DEFAULT_REGION: Optional[str] = None
| 0 | 0 | 0 |
fbd7345df2d3f5d20f5e14e99c1c43086d81847e | 1,331 | py | Python | src/regex/executor.py | avli/nfa-regex | fe9a9817773bfeb36c1069b51fad52e7329361aa | [
"MIT"
] | 5 | 2019-06-24T08:13:53.000Z | 2022-01-04T12:41:08.000Z | src/regex/executor.py | avli/nfa-regex | fe9a9817773bfeb36c1069b51fad52e7329361aa | [
"MIT"
] | null | null | null | src/regex/executor.py | avli/nfa-regex | fe9a9817773bfeb36c1069b51fad52e7329361aa | [
"MIT"
] | 1 | 2021-03-10T01:41:47.000Z | 2021-03-10T01:41:47.000Z | """The main API module. """
from __future__ import absolute_import, print_function
from .compiler import State, SplitState, Match, compile
from .tokenizer import to_postfix
def match(pattern, s):
"""Apply a pattern to a string and return the result of the match.
:param pattern: A POSIX-like regular expression.
:type pattern: str
:s: A string to match.
:type s: str
:returns: True if matches, False otherwise.
:rtype: bool
:raises: :py:class:`~MalformedRegex` if the regular expression is
malformed.
"""
postfix = to_postfix(pattern)
state = compile(postfix)
current_states = set()
update_states(current_states, state)
for c in s:
current_states = make_step(current_states, c)
return Match in current_states
| 26.62 | 70 | 0.687453 | """The main API module. """
from __future__ import absolute_import, print_function
from .compiler import State, SplitState, Match, compile
from .tokenizer import to_postfix
def update_states(current_states, state):
if state in current_states:
return
elif isinstance(state, State) or state is Match:
current_states.add(state)
elif isinstance(state, SplitState):
update_states(current_states, state.outs[0])
update_states(current_states, state.outs[1])
def make_step(current_states, c):
new_states = set()
for state in current_states:
if state is not Match and state.c == c:
update_states(new_states, state.outs[0])
return new_states
def match(pattern, s):
"""Apply a pattern to a string and return the result of the match.
:param pattern: A POSIX-like regular expression.
:type pattern: str
:s: A string to match.
:type s: str
:returns: True if matches, False otherwise.
:rtype: bool
:raises: :py:class:`~MalformedRegex` if the regular expression is
malformed.
"""
postfix = to_postfix(pattern)
state = compile(postfix)
current_states = set()
update_states(current_states, state)
for c in s:
current_states = make_step(current_states, c)
return Match in current_states
| 491 | 0 | 46 |
e4dce5d81645c31b55d04dee0baa11f52312e334 | 1,038 | py | Python | ornitho/model/right.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | 3 | 2020-06-17T17:58:54.000Z | 2022-03-27T17:26:07.000Z | ornitho/model/right.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | null | null | null | ornitho/model/right.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | 1 | 2021-12-17T13:13:10.000Z | 2021-12-17T13:13:10.000Z | from typing import List, Union
from ornitho import APIRequester
| 28.054054 | 83 | 0.55106 | from typing import List, Union
from ornitho import APIRequester
class Right:
ENDPOINT: str = "observers/rights"
def __init__(self, id_: int, name: str, comment: str) -> None:
"""Detail constructor
:param id: ID
:param name: Name
:param comment: Comment
:type id: int
:type name: str
:type comment: str
"""
self.id_: int = id_
self.name: str = name
self.comment: str = comment
def __str__(self) -> str:
return f"{self.id_}-{self.name}-{self.comment}"
@classmethod
def retrieve_for_observer(cls, id_observer: Union[int, str]) -> List["Right"]:
with APIRequester() as requester:
url = f"{cls.ENDPOINT}/{id_observer}"
response, pk = requester.request_raw(
method="get",
url=url,
)
return [
cls(id_=int(right["id"]), name=right["name"], comment=right["comment"])
for right in response["data"]["rights"]
]
| 491 | 458 | 23 |
db5fb949ccde43873ffc3924ac1e5571fce6f30b | 61 | py | Python | pymusiclooper/__init__.py | sysnoble/PyMusicLooper | 67e0c1d9d6766c5ca390ec0b5824b0d683e5600e | [
"MIT"
] | null | null | null | pymusiclooper/__init__.py | sysnoble/PyMusicLooper | 67e0c1d9d6766c5ca390ec0b5824b0d683e5600e | [
"MIT"
] | null | null | null | pymusiclooper/__init__.py | sysnoble/PyMusicLooper | 67e0c1d9d6766c5ca390ec0b5824b0d683e5600e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# coding=utf-8
__version__ = "3.0.0-dev"
| 12.2 | 25 | 0.655738 | #!/usr/bin/python3
# coding=utf-8
__version__ = "3.0.0-dev"
| 0 | 0 | 0 |
51d2a0340a4dea03a0f1e0756eac8043c921fc0f | 1,404 | py | Python | distask/tiggers/interval.py | tickbh/distask | 266435b1d5ba0b2b54e5f354781d48795b41bb3b | [
"MulanPSL-1.0"
] | 1 | 2021-06-19T01:09:27.000Z | 2021-06-19T01:09:27.000Z | distask/tiggers/interval.py | tickbh/distask | 266435b1d5ba0b2b54e5f354781d48795b41bb3b | [
"MulanPSL-1.0"
] | null | null | null | distask/tiggers/interval.py | tickbh/distask | 266435b1d5ba0b2b54e5f354781d48795b41bb3b | [
"MulanPSL-1.0"
] | 1 | 2021-06-26T15:04:09.000Z | 2021-06-26T15:04:09.000Z | import time
from distask import util
from distask.tiggers.base import Tigger
class IntervalTigger(Tigger):
'''每隔相同的时间会触发'''
def __getstate__(self):
"""Return state values to be pickled."""
return (self.microseconds)
def __setstate__(self, state):
"""Restore state from the unpickled state values."""
self.microseconds = state
| 35.1 | 95 | 0.64886 | import time
from distask import util
from distask.tiggers.base import Tigger
class IntervalTigger(Tigger):
'''每隔相同的时间会触发'''
def __init__(self, microseconds = None, seconds=None, minutes=None, hours=None, days=None):
self.microseconds = 0
if microseconds: self.microseconds = microseconds
if seconds: self.microseconds = seconds * 1000 + self.microseconds
if minutes: self.microseconds = minutes * 60_000 + self.microseconds
if hours: self.microseconds = hours * 3600_000 + self.microseconds
if days: self.microseconds = days * 86400_000 + self.microseconds
if not self.microseconds:
raise AttributeError("not vaild interval")
def get_next_time(self, pre = None, limit=None):
now = util.micro_now()
tiggers = []
if not pre or pre >= now:
tiggers.append(now + self.microseconds)
return tiggers
for t in range(int(pre), now, self.microseconds):
tiggers.append(t + self.microseconds)
return tiggers
def __str__(self) -> str:
return "interval {} microseconds".format(self.microseconds)
def __getstate__(self):
"""Return state values to be pickled."""
return (self.microseconds)
def __setstate__(self, state):
"""Restore state from the unpickled state values."""
self.microseconds = state
| 944 | 0 | 81 |
baeb6676733dcc0b2aa9f9b5fcee68ba51f62fe5 | 710 | py | Python | main_monte.py | veritaem/RL_udemy | f817be686b85406fbddd961441c4970bf8b05045 | [
"MIT"
] | null | null | null | main_monte.py | veritaem/RL_udemy | f817be686b85406fbddd961441c4970bf8b05045 | [
"MIT"
] | null | null | null | main_monte.py | veritaem/RL_udemy | f817be686b85406fbddd961441c4970bf8b05045 | [
"MIT"
] | null | null | null | import gym
from monte_carlo import Agent
if __name__=='__main__':
env=gym.make("Blackjack-v0")
agent=Agent()
n_episodes=500000
for i in range(n_episodes):
if i%50000==0:
print(f'starting episode {i}')
observation=env.reset()
done=False
while not done:
#choose an action based on the policy
action=agent.policy(observation)
# take the action
observation_, reward, done, info= env.step(action)
agent.memory.append((observation, reward))
observation=observation_
agent.update_V()
print(agent.V[(21, 3, True)])
print(agent.V[(4, 1, False)])
| 29.583333 | 63 | 0.574648 | import gym
from monte_carlo import Agent
if __name__=='__main__':
env=gym.make("Blackjack-v0")
agent=Agent()
n_episodes=500000
for i in range(n_episodes):
if i%50000==0:
print(f'starting episode {i}')
observation=env.reset()
done=False
while not done:
#choose an action based on the policy
action=agent.policy(observation)
# take the action
observation_, reward, done, info= env.step(action)
agent.memory.append((observation, reward))
observation=observation_
agent.update_V()
print(agent.V[(21, 3, True)])
print(agent.V[(4, 1, False)])
| 0 | 0 | 0 |
afb47de2e54683cd32ecb92965e3cbf57e48613c | 1,871 | py | Python | src/day7.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/day7.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/day7.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | #!/bin/python3
# Copyright (C) 2020 Matheus Fernandes Bigolin <mfrdrbigolin@disroot.org>
# SPDX-License-Identifier: MIT
"""Day Seven, Handy Haversacks."""
# I had to postpone this day because I was not aware of the techniques of
# graph theory to solve this problem.
from sys import argv
from re import findall
from utils import open_file, arrange, usage_and_exit, transfiged, dictf, \
merge
def edges(graph):
"""Return the edges of a <graph>."""
edge = []
for vertex in graph:
for neighbour in graph[vertex]:
edge.append((vertex, neighbour))
return edge
def solve1(bags, elem):
"""Return a set of ancestors of <elem> in the graph <bags>."""
have = set()
for edge in edges(bags):
if edge[1] == elem:
have |= solve1(bags, edge[0]) | {edge[0]}
return have
def solve2(bags, elem):
"""Return the cumulative weight of elements from <elem> in <bags>."""
count = 0
for edge in edges(bags):
if edge[0] == elem:
count += edge[1][0] * solve2(bags, edge[1][1]) + edge[1][0]
return count
# Capture the bag name and its contents (ignoring weights).
UNWEIGHTED_REG = r"(?:^|\d+ ?)(.+?) bags?"
# Capture the bag's contents and its weights.
WEIGHTED_REG = r"(\d+) (.+?) bags?"
# Capture the bag name.
VERTEX_REG = r"^(.+?) bags"
if __name__ == "__main__":
usage_and_exit(len(argv) != 2)
arranged_data = arrange(open_file(argv[1]))
unweighted_data = merge([dictf(findall(UNWEIGHTED_REG, f))
for f in arranged_data])
weighted_data = merge([dictf(findall(VERTEX_REG, f) + transfiged
(findall(WEIGHTED_REG, f), (int, str)))
for f in arranged_data])
print(len(solve1(unweighted_data, "shiny gold")))
print(solve2(weighted_data, "shiny gold"))
| 24.946667 | 74 | 0.61411 | #!/bin/python3
# Copyright (C) 2020 Matheus Fernandes Bigolin <mfrdrbigolin@disroot.org>
# SPDX-License-Identifier: MIT
"""Day Seven, Handy Haversacks."""
# I had to postpone this day because I was not aware of the techniques of
# graph theory to solve this problem.
from sys import argv
from re import findall
from utils import open_file, arrange, usage_and_exit, transfiged, dictf, \
merge
def edges(graph):
"""Return the edges of a <graph>."""
edge = []
for vertex in graph:
for neighbour in graph[vertex]:
edge.append((vertex, neighbour))
return edge
def solve1(bags, elem):
"""Return a set of ancestors of <elem> in the graph <bags>."""
have = set()
for edge in edges(bags):
if edge[1] == elem:
have |= solve1(bags, edge[0]) | {edge[0]}
return have
def solve2(bags, elem):
"""Return the cumulative weight of elements from <elem> in <bags>."""
count = 0
for edge in edges(bags):
if edge[0] == elem:
count += edge[1][0] * solve2(bags, edge[1][1]) + edge[1][0]
return count
# Capture the bag name and its contents (ignoring weights).
UNWEIGHTED_REG = r"(?:^|\d+ ?)(.+?) bags?"
# Capture the bag's contents and its weights.
WEIGHTED_REG = r"(\d+) (.+?) bags?"
# Capture the bag name.
VERTEX_REG = r"^(.+?) bags"
if __name__ == "__main__":
usage_and_exit(len(argv) != 2)
arranged_data = arrange(open_file(argv[1]))
unweighted_data = merge([dictf(findall(UNWEIGHTED_REG, f))
for f in arranged_data])
weighted_data = merge([dictf(findall(VERTEX_REG, f) + transfiged
(findall(WEIGHTED_REG, f), (int, str)))
for f in arranged_data])
print(len(solve1(unweighted_data, "shiny gold")))
print(solve2(weighted_data, "shiny gold"))
| 0 | 0 | 0 |
ef613548c150be7b83da8302c638cc0223096d1f | 405 | py | Python | nerddiary/asynctools/test/test_delayedsignal.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | null | null | null | nerddiary/asynctools/test/test_delayedsignal.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | 5 | 2022-02-20T06:10:28.000Z | 2022-03-28T03:22:41.000Z | nerddiary/asynctools/test/test_delayedsignal.py | mishamsk/nerddiary | 2d0981c5034460f353c2994347fb95a5c94a55bd | [
"Apache-2.0"
] | null | null | null | import signal
from time import sleep
from nerddiary.asynctools.delayedsignal import DelayedKeyboardInterrupt
| 25.3125 | 82 | 0.735802 | import signal
from time import sleep
from nerddiary.asynctools.delayedsignal import DelayedKeyboardInterrupt
def run():
with DelayedKeyboardInterrupt():
sleep(1)
print("Ok")
def test_sigint_same_process(interrupt_with_sigal):
exitcode, out, err = interrupt_with_sigal(run, 0.5, signal.SIGINT)
assert exitcode == 1 and out == "Ok\n" and err.endswith("KeyboardInterrupt\n")
| 247 | 0 | 46 |
20afce4036b3521f093a9b82048f9c2a04955401 | 14,324 | py | Python | main.py | Tiamat-Tech/DETReg | cea2cd2db456e502522a04f8cc4f38326c2466de | [
"Apache-2.0"
] | 212 | 2021-06-09T10:32:25.000Z | 2022-03-29T07:48:07.000Z | main.py | Tiamat-Tech/DETReg | cea2cd2db456e502522a04f8cc4f38326c2466de | [
"Apache-2.0"
] | 34 | 2021-07-16T11:27:03.000Z | 2022-03-25T08:42:16.000Z | main.py | Tiamat-Tech/DETReg | cea2cd2db456e502522a04f8cc4f38326c2466de | [
"Apache-2.0"
] | 23 | 2021-07-28T00:15:28.000Z | 2022-03-17T11:35:08.000Z | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch, viz
from models import build_model
from models.backbone import build_swav_backbone, build_swav_backbone_old
from util.default_args import set_model_defaults, get_args_parser
PRETRAINING_DATASETS = ['imagenet', 'imagenet100', 'coco_pretrain', 'airbus_pretrain']
if __name__ == '__main__':
parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
set_dataset_path(args)
set_model_defaults(args)
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 44.073846 | 150 | 0.629363 | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch, viz
from models import build_model
from models.backbone import build_swav_backbone, build_swav_backbone_old
from util.default_args import set_model_defaults, get_args_parser
PRETRAINING_DATASETS = ['imagenet', 'imagenet100', 'coco_pretrain', 'airbus_pretrain']
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
if args.random_seed:
args.seed = np.random.randint(0, 1000000)
if args.resume:
checkpoint_args = torch.load(args.resume, map_location='cpu')['args']
args.seed = checkpoint_args.seed
print("Loaded random seed from checkpoint:", checkpoint_args.seed)
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
print(f"Using random seed: {seed}")
swav_model = None
if args.dataset in PRETRAINING_DATASETS:
if args.obj_embedding_head == 'head':
swav_model = build_swav_backbone(args, device)
elif args.obj_embedding_head == 'intermediate':
swav_model = build_swav_backbone_old(args, device)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel()
for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
dataset_train, dataset_val = get_datasets(args)
if args.distributed:
if args.cache_mode:
sampler_train = samplers.NodeDistributedSampler(dataset_train)
sampler_val = samplers.NodeDistributedSampler(
dataset_val, shuffle=False)
else:
sampler_train = samplers.DistributedSampler(dataset_train)
sampler_val = samplers.DistributedSampler(
dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
coco_evaluator = None
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers,
pin_memory=True)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,
pin_memory=True)
# lr_backbone_names = ["backbone.0", "backbone.neck", "input_proj", "transformer.encoder"]
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
for n, p in model_without_ddp.named_parameters():
print(n)
param_dicts = [
{
"params":
[p for n, p in model_without_ddp.named_parameters()
if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
"lr": args.lr,
},
{
"params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],
"lr": args.lr_backbone,
},
{
"params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
"lr": args.lr * args.lr_linear_proj_mult,
}
]
if args.sgd:
optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9,
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
elif args.dataset_file == "coco" or args.dataset_file == "airbus":
base_ds = get_coco_api_from_dataset(dataset_val)
else:
base_ds = dataset_val
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.pretrain:
print('Initialized from the pre-training model')
checkpoint = torch.load(args.pretrain, map_location='cpu')
state_dict = checkpoint['model']
for k in list(state_dict.keys()):
# remove useless class embed
if 'class_embed' in k:
del state_dict[k]
msg = model_without_ddp.load_state_dict(state_dict, strict=False)
print(msg)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
missing_keys, unexpected_keys = model_without_ddp.load_state_dict(
checkpoint['model'], strict=False)
unexpected_keys = [k for k in unexpected_keys if not (
k.endswith('total_params') or k.endswith('total_ops'))]
if len(missing_keys) > 0:
print('Missing Keys: {}'.format(missing_keys))
if len(unexpected_keys) > 0:
print('Unexpected Keys: {}'.format(unexpected_keys))
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
import copy
p_groups = copy.deepcopy(optimizer.param_groups)
optimizer.load_state_dict(checkpoint['optimizer'])
for pg, pg_old in zip(optimizer.param_groups, p_groups):
pg['lr'] = pg_old['lr']
pg['initial_lr'] = pg_old['initial_lr']
print(optimizer.param_groups)
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
# todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).
args.override_resumed_lr_drop = True
if args.override_resumed_lr_drop:
print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.')
lr_scheduler.step_size = args.lr_drop
lr_scheduler.base_lrs = list(
map(lambda group: group['initial_lr'], optimizer.param_groups))
lr_scheduler.step(lr_scheduler.last_epoch)
args.start_epoch = checkpoint['epoch'] + 1
# check the resumed model
if (not args.eval and not args.viz and args.dataset in ['coco', 'voc']):
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
if args.viz:
viz(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, swav_model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 5 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
if args.dataset in ['coco', 'voc'] and epoch % args.eval_every == 0:
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
else:
test_stats = {}
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if 'imagenet' not in args.dataset and coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def get_datasets(args):
if args.dataset == 'coco':
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'coco_pretrain':
from datasets.selfdet import build_selfdet
dataset_train = build_selfdet(
'train', args=args, p=os.path.join(args.coco_path, 'train2017'))
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'airbus':
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'airbus_pretrain':
from datasets.selfdet import build_selfdet
dataset_train = build_selfdet(
'train', args=args, p=os.path.join(args.airbus_path, 'train_v2'))
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'imagenet':
from datasets.selfdet import build_selfdet
dataset_train = build_selfdet(
'train', args=args, p=os.path.join(args.imagenet_path, 'train'))
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'imagenet100':
from datasets.selfdet import build_selfdet
dataset_train = build_selfdet(
'train', args=args, p=os.path.join(args.imagenet100_path, 'train'))
dataset_val = build_dataset(image_set='val', args=args)
elif args.dataset == 'voc':
from datasets.torchvision_datasets.voc import VOCDetection
from datasets.coco import make_coco_transforms
dataset_train = VOCDetection(args.voc_path, ["2007", "2012"], image_sets=['trainval', 'trainval'],
transforms=make_coco_transforms('train'), filter_pct=args.filter_pct)
dataset_val = VOCDetection(args.voc_path, ["2007"], image_sets=[
'test'], transforms=make_coco_transforms('val'))
else:
raise ValueError(f"Wrong dataset name: {args.dataset}")
return dataset_train, dataset_val
def set_dataset_path(args):
args.coco_path = os.path.join(args.data_root, 'MSCoco')
args.airbus_path = os.path.join(args.data_root, 'airbus-ship-detection')
args.imagenet_path = os.path.join(args.data_root, 'ilsvrc')
args.imagenet100_path = os.path.join(args.data_root, 'ilsvrc100')
args.voc_path = os.path.join(args.data_root, 'pascal')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
set_dataset_path(args)
set_model_defaults(args)
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 12,780 | 0 | 69 |
978d471b17063ea5a74f283f2e45bbf497328a77 | 2,041 | py | Python | tests/Action/test_Conditional.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T00:15:26.000Z | 2022-01-14T00:15:26.000Z | tests/Action/test_Conditional.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | null | null | null | tests/Action/test_Conditional.py | aalireza/arep | 95f0ec6282c4f5d12462d2a64e82d6777f51bf06 | [
"BSD-3-Clause"
] | null | null | null | from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_with_elif = results_formatter({
(2, 0), (13, 0)
})
results_with_else = results_formatter({
(2, 0), (11, 5)
})
results_is_ifexp = results_formatter({
(11, 5)
})
results_in_comprehensions = results_formatter({
(18, 10), (20, 11), (23, 4), (23, 16)
})
misc_results = results_formatter({
(30, 4)
})
all_results = (misc_results | results_with_elif | results_with_else |
results_is_ifexp | results_in_comprehensions)
@pytest.fixture
@pytest.mark.parametrize(('elif_'), [True, False, None])
@pytest.mark.parametrize(('else_'), [True, False, None])
@pytest.mark.parametrize(('ifexp'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
| 30.462687 | 79 | 0.675649 | from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_with_elif = results_formatter({
(2, 0), (13, 0)
})
results_with_else = results_formatter({
(2, 0), (11, 5)
})
results_is_ifexp = results_formatter({
(11, 5)
})
results_in_comprehensions = results_formatter({
(18, 10), (20, 11), (23, 4), (23, 16)
})
misc_results = results_formatter({
(30, 4)
})
all_results = (misc_results | results_with_elif | results_with_else |
results_is_ifexp | results_in_comprehensions)
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Conditional.py'))
return engine
@pytest.mark.parametrize(('elif_'), [True, False, None])
@pytest.mark.parametrize(('else_'), [True, False, None])
@pytest.mark.parametrize(('ifexp'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
def test_Conditional(grepper, action, consideration, elif_, else_, ifexp):
if any([consideration, elif_, else_, ifexp]):
action.reset()
action.Conditional.consideration = consideration
action.Conditional.else_ = else_
action.Conditional.elif_ = elif_
action.Conditional.ifexp = ifexp
grepper.constraint_list.append(action)
obtained_results = set(grepper.all_results())
if ifexp is None:
target_results = all_results.copy()
elif ifexp is True:
target_results = results_is_ifexp.copy()
elif ifexp is False:
target_results = (all_results - results_is_ifexp)
if elif_ is True:
target_results &= results_with_elif
elif elif_ is False:
target_results -= results_with_elif
if else_ is True:
target_results &= results_with_else
elif else_ is False:
target_results -= results_with_else
assert obtained_results == target_results
| 1,095 | 0 | 44 |
9b8af072244e762dbf07841299fca6ff38da8282 | 740 | py | Python | 5. Probability and Statistics/probability-fundamentals/Probability Rules-378.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | 5. Probability and Statistics/probability-fundamentals/Probability Rules-378.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | 5. Probability and Statistics/probability-fundamentals/Probability Rules-378.py | bibekuchiha/dataquest | c7d8a2966fe2eee864442a59d64309033ea9993e | [
"MIT"
] | null | null | null | ## 1. Sample Space ##
coin_toss_omega = {'TH','HT','HH','TT'}
## 2. Probability of Events ##
p_sum_6 = 5/ 36
p_lower_15 = 36/ 36
p_greater_13 = 0/ 36
## 3. Certain and Impossible Events ##
p_2_or_4 = 4/ 36
p_12_or_13 = 1/ 36
## 4. The Addition Rule ##
p_5_or_9 = (4/36) + (4/36)
p_even_or_less_2 = (18/36) + (0/36)
p_4_or_3_multiple = 3/36 + 12/36
## 5. Venn Diagrams ##
p_c = 3/6
p_d = 3/6
p_c_d_addition = p_c + p_d
p_c_d_formula = 4/6
print(p_c_d_addition)
print(p_c_d_formula)
## 6. Exceptions to the Addition Rule ##
p_f_or_t = 0.26 + 0.11 - 0.03
## 7. Mutually Exclusive Events ##
p_h_and_c = 0.08 + 0.11 - 0.17
## 8. Set Notation ##
operation_1 = False
operation_2 = True
operation_3 = False
operation_4 = True | 14.509804 | 40 | 0.645946 | ## 1. Sample Space ##
coin_toss_omega = {'TH','HT','HH','TT'}
## 2. Probability of Events ##
p_sum_6 = 5/ 36
p_lower_15 = 36/ 36
p_greater_13 = 0/ 36
## 3. Certain and Impossible Events ##
p_2_or_4 = 4/ 36
p_12_or_13 = 1/ 36
## 4. The Addition Rule ##
p_5_or_9 = (4/36) + (4/36)
p_even_or_less_2 = (18/36) + (0/36)
p_4_or_3_multiple = 3/36 + 12/36
## 5. Venn Diagrams ##
p_c = 3/6
p_d = 3/6
p_c_d_addition = p_c + p_d
p_c_d_formula = 4/6
print(p_c_d_addition)
print(p_c_d_formula)
## 6. Exceptions to the Addition Rule ##
p_f_or_t = 0.26 + 0.11 - 0.03
## 7. Mutually Exclusive Events ##
p_h_and_c = 0.08 + 0.11 - 0.17
## 8. Set Notation ##
operation_1 = False
operation_2 = True
operation_3 = False
operation_4 = True | 0 | 0 | 0 |
420dc3cb1fda2bf37b4238763133c898f1af9b53 | 304 | py | Python | Python_OO/NumeroComParametrodeVariavel.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null | Python_OO/NumeroComParametrodeVariavel.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null | Python_OO/NumeroComParametrodeVariavel.py | Madara701/Python_OO | 8d67569a8c4771dd82f5259c2ed5e782cd4e4036 | [
"Apache-2.0"
] | null | null | null |
Funcao(1,2,3, 'fabio')
'''
a utilização do args serve para poder passa mais de um parametro cqunado declaramos a função ou uma classe
sempre utilizamos * ou ** para transformar em tupla ou dicionario
'''
F1(nome ='Fabio',idade=25)
| 23.384615 | 106 | 0.710526 | def Funcao(*args):
print(args)
Funcao(1,2,3, 'fabio')
'''
a utilização do args serve para poder passa mais de um parametro cqunado declaramos a função ou uma classe
sempre utilizamos * ou ** para transformar em tupla ou dicionario
'''
def F1(**Kwargs):
print(Kwargs)
F1(nome ='Fabio',idade=25)
| 27 | 0 | 44 |
fdcb49336a1fb5846df01b552975c21502fd5069 | 8,426 | py | Python | src/cowrie/output/omniscidb.py | Zeerg/cowrie | 27c5c416e52b87bfa7d4c75ec8fbbe08203f5949 | [
"BSD-3-Clause"
] | null | null | null | src/cowrie/output/omniscidb.py | Zeerg/cowrie | 27c5c416e52b87bfa7d4c75ec8fbbe08203f5949 | [
"BSD-3-Clause"
] | null | null | null | src/cowrie/output/omniscidb.py | Zeerg/cowrie | 27c5c416e52b87bfa7d4c75ec8fbbe08203f5949 | [
"BSD-3-Clause"
] | null | null | null | # A simple logger to export events to omnisci
from __future__ import absolute_import, division
import geoip2.database
import pymapd as pmd
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
OmniSciDB Output
"""
| 38.474886 | 76 | 0.559221 | # A simple logger to export events to omnisci
from __future__ import absolute_import, division
import geoip2.database
import pymapd as pmd
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class CowrieOutput():
def __init__(self):
self.session = None
self.event_time = None
self.sensor_id = None
self.ip_address = None
self.username = None
self.password = None
self.login_result = None
self.command_input = None
self.command_result = None
self.url = None
self.outfile = None
self.shasum = None
self.download_result = None
self.was_upload = None
self.src_lon = None
self.src_lat = None
self.src_city = None
self.src_state = None
self.src_zip_code = None
self.src_country = None
self.src_country_iso = None
def make_tuple(self):
return (self.session,
self.event_time,
self.sensor_id,
self.ip_address,
self.username,
self.password,
self.login_result,
self.command_input,
self.command_result,
self.url,
self.outfile,
self.shasum,
self.download_result,
self.was_upload,
self.src_lon,
self.src_lat,
self.src_city,
self.src_state,
self.src_zip_code,
self.src_country,
self.src_country_iso
)
def set_src_geo(self, src_geo_tuple):
self.src_lon = src_geo_tuple[0]
self.src_lat = src_geo_tuple[1]
self.src_city = src_geo_tuple[2]
self.src_state = src_geo_tuple[3]
self.src_zip_code = src_geo_tuple[4]
self.src_country = src_geo_tuple[5]
self.src_country_iso = src_geo_tuple[6]
class Output(cowrie.core.output.Output):
"""
OmniSciDB Output
"""
def start(self):
self.host = CowrieConfig().get(
'output_omniscidb', 'host', fallback=None)
self.port = CowrieConfig().get(
'output_omniscidb', 'port', fallback=None)
self.db = CowrieConfig().get(
'output_omniscidb', 'db', fallback=None)
self.protocol = CowrieConfig().get(
'output_omniscidb', 'protocol', fallback=None)
self.username = CowrieConfig().get(
'output_omniscidb', 'username', fallback=None)
self.password = CowrieConfig().get(
'output_omniscidb', 'password', fallback=None)
self.mmdb_location = CowrieConfig().get(
'output_omniscidb', 'mmdb_location', fallback=None)
try:
self.connection = pmd.connect(user=self.username,
password=self.password,
host=self.host,
dbname=self.db,
protocol=self.protocol,
port=self.port)
self.cursor = self.connection.cursor()
except Exception as e:
log.msg("Failed to login to OmniSciDB: got error {0}".format(e))
if self.mmdb_location is not None:
self.mmdb_geo = geoip2.database.Reader(self.mmdb_location)
try:
# Create our Tables for Cowrie
self.cursor.execute("CREATE TABLE cowrie_sessions \
(session TEXT ENCODING DICT(32),\
event_time TIMESTAMP(0),\
sensor TEXT ENCODING DICT(32),\
ip_address TEXT ENCODING DICT(32), \
username TEXT ENCODING DICT(32),\
password TEXT ENCODING DICT(32),\
login_result TEXT ENCODING DICT(32),\
command_input TEXT ENCODING DICT(32),\
command_result TEXT ENCODING DICT(32), \
url TEXT ENCODING DICT(32),\
outfile TEXT ENCODING DICT(32),\
shasum TEXT ENCODING DICT(32),\
download_result TEXT ENCODING DICT(32),\
was_upload TEXT ENCODING DICT(32),\
src_lon float,\
src_lat float,\
src_city TEXT ENCODING DICT(32),\
src_state TEXT ENCODING DICT(32),\
src_zip_code TEXT ENCODING DICT(32),\
src_country TEXT ENCODING DICT(16),\
src_country_iso TEXT ENCODING DICT(16))")
except Exception as e:
log.msg("Failed to create table got error {0}".format(e))
def maxmind_geo_lookup(self, reader, ip):
try:
response = reader.city(ip)
return (float(response.location.longitude),
float(response.location.latitude),
str(response.city.name),
str(response.subdivisions.most_specific.name),
str(response.postal.code),
str(response.country.name),
str(response.country.iso_code))
except Exception as e:
log.msg("Failed to lookup ip {0}".format(e))
return None
def load_data(self, data_dict):
try:
self.connection.load_table_rowwise("cowrie_sessions", data_dict)
except Exception as e:
log.msg("output_omniscidb: Error %s" % (e))
def stop(self):
log.msg("Closing OmniSciDB connection")
self.connection.close()
def write(self, entry):
# Create class that holds basic data for all events
cowrie_output = CowrieOutput()
cowrie_output.session = entry["session"]
cowrie_output.event_time = entry["time"]
cowrie_output.sensor_id = self.sensor
# Handle the basic connection
if entry["eventid"] == 'cowrie.session.connect':
cowrie_output.ip_address = entry["src_ip"]
self.load_data([cowrie_output.make_tuple()])
# Handle the login events
elif 'cowrie.login' in entry["eventid"]:
cowrie_output.ip_address = entry["src_ip"]
cowrie_output.username = entry['username']
cowrie_output.password = entry['password']
# If Maxmind is loaded set the geoip tuple.
if self.mmdb_geo:
src_geo_tuple = (
self.maxmind_geo_lookup(
self.mmdb_geo, cowrie_output.ip_address))
if src_geo_tuple is not None:
cowrie_output.set_src_geo(src_geo_tuple)
if entry["eventid"] == 'cowrie.login.success':
cowrie_output.login_result = "1"
self.load_data([cowrie_output.make_tuple()])
if entry["eventid"] == 'cowrie.login.failed':
cowrie_output.login_result = "0"
self.load_data([cowrie_output.make_tuple()])
# Handle the command events
elif 'cowrie.command' in entry["eventid"]:
cowrie_output.command_input = entry["input"]
if entry["eventid"] == 'cowrie.command.input':
cowrie_output.command_result = "1"
self.load_data([cowrie_output.make_tuple()])
if entry["eventid"] == 'cowrie.command.failed':
cowrie_output.command_result = "0"
self.load_data([cowrie_output.make_tuple()])
# Handle upload and download events
elif 'cowrie.session.file' in entry["eventid"]:
cowrie_output.url = entry['url']
if entry["eventid"] == 'cowrie.session.file_download':
cowrie_output.shasum = entry['shasum']
cowrie_output.outfile = entry['outfile']
cowrie_output.download_result = "1"
self.load_data([cowrie_output.make_tuple()])
if entry["eventid"] == 'cowrie.session.file_download.failed':
cowrie_output.download_result = "0"
self.load_data([cowrie_output.make_tuple()])
if entry["eventid"] == 'cowrie.session.file_upload':
cowrie_output.was_upload = "1"
cowrie_output.shasum = entry['shasum']
cowrie_output.outfile = entry['outfile']
self.load_data([cowrie_output.make_tuple()])
| 7,862 | 0 | 238 |
b2db234b1ac27dbf2278c75c63c341c886c4d51a | 738 | py | Python | scripts/fun/whatsapp_simple_stats.py | oxalorg/dotfiles | f49258abcc1dbc17fbb5fb9548863fce75779a17 | [
"MIT"
] | 13 | 2016-12-07T23:35:02.000Z | 2021-05-18T14:48:46.000Z | scripts/fun/whatsapp_simple_stats.py | oxalorg/dotfiles | f49258abcc1dbc17fbb5fb9548863fce75779a17 | [
"MIT"
] | 2 | 2017-08-15T16:29:24.000Z | 2020-04-10T06:36:19.000Z | scripts/fun/whatsapp_simple_stats.py | oxalorg/dotfiles | f49258abcc1dbc17fbb5fb9548863fce75779a17 | [
"MIT"
] | 5 | 2017-01-23T22:36:44.000Z | 2021-11-02T20:44:45.000Z | import datetime
import sys
import pprint
date_start = datetime.date(2015, 10, 5)
date_end = datetime.date(2016, 2, 8)
date_delta = datetime.timedelta(days=1)
whatsapp_file = sys.argv[1]
with open(whatsapp_file, 'r') as fp:
lines = fp.readlines()
day_count = {}
while date_start <= date_end:
date_start += date_delta
day_count[date_start.strftime("%d/%m/%Y")] = 0
line_count = 0
for line in lines:
line_count += 1
try:
day_count[line[0:10]] += 1
except:
pass
max_count = max(day_count.values())
max_key = max(day_count, key=lambda k: day_count[k])
print("Maximum messages: ", max_count, " on date: ", max_key)
print("Total line count = ", line_count)
print("Total days = ", len(day_count))
| 21.085714 | 61 | 0.673442 | import datetime
import sys
import pprint
date_start = datetime.date(2015, 10, 5)
date_end = datetime.date(2016, 2, 8)
date_delta = datetime.timedelta(days=1)
whatsapp_file = sys.argv[1]
with open(whatsapp_file, 'r') as fp:
lines = fp.readlines()
day_count = {}
while date_start <= date_end:
date_start += date_delta
day_count[date_start.strftime("%d/%m/%Y")] = 0
line_count = 0
for line in lines:
line_count += 1
try:
day_count[line[0:10]] += 1
except:
pass
max_count = max(day_count.values())
max_key = max(day_count, key=lambda k: day_count[k])
print("Maximum messages: ", max_count, " on date: ", max_key)
print("Total line count = ", line_count)
print("Total days = ", len(day_count))
| 0 | 0 | 0 |
7cbad9a991b60892e16f0c92b20f36073fe23c15 | 1,300 | py | Python | lecture3/tests/question-3_4.py | ggorman/Introduction-Python-programming-2018 | 739b864c1499ccdbf9010d8fe774087a07bb09ee | [
"CC-BY-3.0"
] | 1 | 2019-01-12T12:43:24.000Z | 2019-01-12T12:43:24.000Z | lecture3/tests/question-3_4.py | ggorman/Introduction-Python-programming-2018 | 739b864c1499ccdbf9010d8fe774087a07bb09ee | [
"CC-BY-3.0"
] | null | null | null | lecture3/tests/question-3_4.py | ggorman/Introduction-Python-programming-2018 | 739b864c1499ccdbf9010d8fe774087a07bb09ee | [
"CC-BY-3.0"
] | 3 | 2019-05-16T21:08:48.000Z | 2022-02-21T06:54:57.000Z | test = {
'name': 'question 3.4',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> type(my_factorial) == types.FunctionType
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> len(param) # wrong number of argument
1
""",
'hidden': False,
'locked': False
}
],
'scored': False,
'setup': 'import types; import inspect; param = inspect.signature(my_factorial).parameters',
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> my_factorial(0)==math.factorial(0)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(1)==math.factorial(1)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(42)==math.factorial(42)
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': 'import math',
'teardown': '',
'type': 'doctest'
}
]
}
| 20.634921 | 98 | 0.372308 | test = {
'name': 'question 3.4',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> type(my_factorial) == types.FunctionType
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> len(param) # wrong number of argument
1
""",
'hidden': False,
'locked': False
}
],
'scored': False,
'setup': 'import types; import inspect; param = inspect.signature(my_factorial).parameters',
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> my_factorial(0)==math.factorial(0)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(1)==math.factorial(1)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(42)==math.factorial(42)
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': 'import math',
'teardown': '',
'type': 'doctest'
}
]
}
| 0 | 0 | 0 |
424afe2b1d98716108c7d7f1fc4d8683ffc2df68 | 187 | py | Python | poolink_backend/apps/link/urls.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | poolink_backend/apps/link/urls.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | poolink_backend/apps/link/urls.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | from django.urls import path
from .api.views import link_search_view, link_view
app_name = "links"
urlpatterns = [
path("", view=link_view),
path("search", link_search_view),
]
| 18.7 | 50 | 0.716578 | from django.urls import path
from .api.views import link_search_view, link_view
app_name = "links"
urlpatterns = [
path("", view=link_view),
path("search", link_search_view),
]
| 0 | 0 | 0 |
29117fef165df4facb3455ee93ba032958fd76d0 | 2,194 | py | Python | tests/test_xdg.py | abhishek-kumar-code/redfishtool_nsfcac | 928edbf2c9c3ebfd6cb4722a2a77b1e63372211c | [
"MIT"
] | 8,477 | 2016-05-19T08:57:19.000Z | 2020-08-12T11:08:08.000Z | tests/test_xdg.py | abhishek-kumar-code/redfishtool_nsfcac | 928edbf2c9c3ebfd6cb4722a2a77b1e63372211c | [
"MIT"
] | 130 | 2016-05-19T13:38:05.000Z | 2020-08-12T07:39:26.000Z | tests/test_xdg.py | abhishek-kumar-code/redfishtool_nsfcac | 928edbf2c9c3ebfd6cb4722a2a77b1e63372211c | [
"MIT"
] | 360 | 2016-05-19T14:36:20.000Z | 2020-07-30T21:55:56.000Z | import os
import stat
import sys
from .base import TempAppDirTestCase
from http_prompt import xdg
| 36.566667 | 73 | 0.612124 | import os
import stat
import sys
from .base import TempAppDirTestCase
from http_prompt import xdg
class TestXDG(TempAppDirTestCase):
def test_get_app_data_home(self):
path = xdg.get_data_dir()
expected_path = os.path.join(os.environ[self.homes['data']],
'http-prompt')
self.assertEqual(path, expected_path)
self.assertTrue(os.path.exists(path))
if sys.platform != 'win32':
# Make sure permission for the directory is 700
mask = stat.S_IMODE(os.stat(path).st_mode)
self.assertTrue(mask & stat.S_IRWXU)
self.assertFalse(mask & stat.S_IRWXG)
self.assertFalse(mask & stat.S_IRWXO)
def test_get_app_config_home(self):
path = xdg.get_config_dir()
expected_path = os.path.join(os.environ[self.homes['config']],
'http-prompt')
self.assertEqual(path, expected_path)
self.assertTrue(os.path.exists(path))
if sys.platform != 'win32':
# Make sure permission for the directory is 700
mask = stat.S_IMODE(os.stat(path).st_mode)
self.assertTrue(mask & stat.S_IRWXU)
self.assertFalse(mask & stat.S_IRWXG)
self.assertFalse(mask & stat.S_IRWXO)
def test_get_resource_data_dir(self):
path = xdg.get_data_dir('something')
expected_path = os.path.join(
os.environ[self.homes['data']], 'http-prompt', 'something')
self.assertEqual(path, expected_path)
self.assertTrue(os.path.exists(path))
# Make sure we can write a file to the directory
with open(os.path.join(path, 'test'), 'wb') as f:
f.write(b'hello')
def test_get_resource_config_dir(self):
path = xdg.get_config_dir('something')
expected_path = os.path.join(
os.environ[self.homes['config']], 'http-prompt', 'something')
self.assertEqual(path, expected_path)
self.assertTrue(os.path.exists(path))
# Make sure we can write a file to the directory
with open(os.path.join(path, 'test'), 'wb') as f:
f.write(b'hello')
| 1,950 | 13 | 131 |
e7cae6339bd56a0e07e9bbfd5eb98c5d42b8a782 | 1,426 | py | Python | Winpy/CaptchaDataset.py | Soapy-Salted-Fish-King/captcha_break | 96abe0a443db9d9bb81563f0fb894687cf88f0dd | [
"MIT"
] | 2,486 | 2017-03-29T16:48:25.000Z | 2022-03-31T17:31:25.000Z | Winpy/CaptchaDataset.py | Soapy-Salted-Fish-King/captcha_break | 96abe0a443db9d9bb81563f0fb894687cf88f0dd | [
"MIT"
] | 67 | 2017-04-01T03:30:20.000Z | 2022-03-08T14:12:03.000Z | Winpy/CaptchaDataset.py | Soapy-Salted-Fish-King/captcha_break | 96abe0a443db9d9bb81563f0fb894687cf88f0dd | [
"MIT"
] | 706 | 2017-03-30T01:22:34.000Z | 2022-03-28T16:16:27.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms.functional import to_tensor, to_pil_image
from captcha.image import ImageCaptcha
from tqdm import tqdm
import random
import numpy as np
from collections import OrderedDict
| 41.941176 | 101 | 0.701964 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms.functional import to_tensor, to_pil_image
from captcha.image import ImageCaptcha
from tqdm import tqdm
import random
import numpy as np
from collections import OrderedDict
class CaptchaDataset(Dataset):
def __init__(self, characters, length, width, height, input_length, label_length):
super(CaptchaDataset, self).__init__()
self.characters = characters
self.length = length
self.width = width
self.height = height
self.input_length = input_length
self.label_length = label_length
self.n_class = len(characters)
self.generator = ImageCaptcha(width=width, height=height)
def __len__(self):
return self.length
def __getitem__(self, index):
random_str = ''.join([random.choice(self.characters[1:]) for j in range(self.label_length)])
image = to_tensor(self.generator.generate_image(random_str))
target = torch.tensor([self.characters.find(x) for x in random_str], dtype=torch.long)
input_length = torch.full(size=(1, ), fill_value=self.input_length, dtype=torch.long)
target_length = torch.full(size=(1, ), fill_value=self.label_length, dtype=torch.long)
return image, target, input_length, target_length | 977 | 9 | 113 |
1c11c0c146e82c558aab7022ebf7b6bf8e6eddcc | 1,740 | py | Python | cogs/stats.py | Machotacoz/discord-bot | b7390fac8df9a0943fcab1d49bf6a54f88d287da | [
"MIT"
] | null | null | null | cogs/stats.py | Machotacoz/discord-bot | b7390fac8df9a0943fcab1d49bf6a54f88d287da | [
"MIT"
] | null | null | null | cogs/stats.py | Machotacoz/discord-bot | b7390fac8df9a0943fcab1d49bf6a54f88d287da | [
"MIT"
] | null | null | null | import discord, os
from discord.ext import commands
from utils import checks, output, parsing
from aiohttp import ClientSession
import urllib.request
import json
| 43.5 | 168 | 0.628161 | import discord, os
from discord.ext import commands
from utils import checks, output, parsing
from aiohttp import ClientSession
import urllib.request
import json
class Stats:
def __init__(self, bot: discord.ext.commands.Bot):
self.bot = bot
@commands.command(pass_context=True)
async def stats(self, ctx, amount=1):
"""
Show stats about NDB
"""
channel_name = ctx.message.channel.name
allowed_channels = parsing.parse_json('config.json')['command_channels'][ctx.command.name]
if channel_name not in allowed_channels:
return
headers={"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36"}
try:
async with ClientSession() as session:
async with session.get("https://coinlib.io/api/v1/coin?key=593ce9b1bef849c6&pref=BTC&symbol=NORT", headers=headers) as response:
responseRaw = await response.read()
priceData = json.loads(responseRaw)
for item in priceData:
embed= discord.Embed(colour=0x00FF00)
embed.set_author(name='NDB Information', icon_url="http://explorer.nort.network/images/logo.png")
embed.add_field(name="Price (BTC)", value="${}".format(item['price']))
embed.set_footer(text="https://wallet.crypto-bridge.org/market/BRIDGE.NORT_BRIDGE.BTC", icon_url="http://explorer.nort.network/images/logo.png")
await self.bot.say(embed=embed)
except:
await self.bot.say(":warning: Error fetching prices!")
def setup(bot):
bot.add_cog(Stats(bot))
| 74 | 1,457 | 46 |
9674fbb105b7e5ebdf80a78a0ed10c1b5a8f04fa | 2,327 | py | Python | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmicsInCollisions_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmicsInCollisions_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmicsInCollisions_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # Author : Andreas Mussgiller
# Date : July 1st, 2010
# last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
#_________________________________HLT bits___________________________________________
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOTkAlCosmicsInCollisionsHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'TkAlCosmicsInCollisions',
throw = False # tolerate triggers not available
)
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlCosmicsInCollisionsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
#_________________________ Cosmic During Collisions__________________________________
from RecoTracker.SpecialSeedGenerators.cosmicDC_cff import *
#________________________________Track selection____________________________________
# AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmics during collisions
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlCosmicsInCollisions = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(
src = 'cosmicDCTracks',
filter = True,
applyBasicCuts = True,
ptMin = 0., ##10
ptMax = 99999.,
pMin = 4., ##10
pMax = 99999.,
etaMin = -99., ##-2.4 keep also what is going through...
etaMax = 99., ## 2.4 ...both TEC with flat slope
nHitMin = 7,
nHitMin2D = 2,
chi2nMax = 999999.,
applyMultiplicityFilter = False,
applyNHighestPt = True, ## select only highest pT track
nHighestPt = 1
)
#________________________________Sequences____________________________________
seqALCARECOTkAlCosmicsInCollisions = cms.Sequence(cosmicDCTracksSeq*ALCARECOTkAlCosmicsInCollisionsHLT+ALCARECOTkAlCosmicsInCollisionsDCSFilter+ALCARECOTkAlCosmicsInCollisions)
| 41.553571 | 176 | 0.762355 | # Author : Andreas Mussgiller
# Date : July 1st, 2010
# last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
#_________________________________HLT bits___________________________________________
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOTkAlCosmicsInCollisionsHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'TkAlCosmicsInCollisions',
throw = False # tolerate triggers not available
)
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlCosmicsInCollisionsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
#_________________________ Cosmic During Collisions__________________________________
from RecoTracker.SpecialSeedGenerators.cosmicDC_cff import *
#________________________________Track selection____________________________________
# AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmics during collisions
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlCosmicsInCollisions = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(
src = 'cosmicDCTracks',
filter = True,
applyBasicCuts = True,
ptMin = 0., ##10
ptMax = 99999.,
pMin = 4., ##10
pMax = 99999.,
etaMin = -99., ##-2.4 keep also what is going through...
etaMax = 99., ## 2.4 ...both TEC with flat slope
nHitMin = 7,
nHitMin2D = 2,
chi2nMax = 999999.,
applyMultiplicityFilter = False,
applyNHighestPt = True, ## select only highest pT track
nHighestPt = 1
)
#________________________________Sequences____________________________________
seqALCARECOTkAlCosmicsInCollisions = cms.Sequence(cosmicDCTracksSeq*ALCARECOTkAlCosmicsInCollisionsHLT+ALCARECOTkAlCosmicsInCollisionsDCSFilter+ALCARECOTkAlCosmicsInCollisions)
| 0 | 0 | 0 |
718e53e07458ffd516f7910c2e12d5132f259d55 | 8,462 | py | Python | argoverse/evaluation/eval_tracking.py | MengshiLi/argoverse-api | 52d6947ab79a3c24a2cf9747cb16284237625d47 | [
"MIT"
] | null | null | null | argoverse/evaluation/eval_tracking.py | MengshiLi/argoverse-api | 52d6947ab79a3c24a2cf9747cb16284237625d47 | [
"MIT"
] | null | null | null | argoverse/evaluation/eval_tracking.py | MengshiLi/argoverse-api | 52d6947ab79a3c24a2cf9747cb16284237625d47 | [
"MIT"
] | 1 | 2020-07-14T13:49:14.000Z | 2020-07-14T13:49:14.000Z | # <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import argparse
import glob
import json
import logging
import os
import pathlib
from typing import Any, Dict, List, TextIO, Union
import motmetrics as mm
import numpy as np
from argoverse.evaluation.eval_utils import get_pc_inside_bbox, label_to_bbox, leave_only_roi_region
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.ply_loader import load_ply
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
min_point_num = 0
mh = mm.metrics.create()
logger = logging.getLogger(__name__)
_PathLike = Union[str, "os.PathLike[str]"]
def in_distance_range_pose(ego_center: np.ndarray, pose: np.ndarray, d_min: float, d_max: float) -> bool:
"""Determine if a pose is within distance range or not.
Args:
ego_center: ego center pose (zero if bbox is in ego frame).
pose: pose to test.
d_min: minimum distance range
d_max: maximum distance range
Returns:
A boolean saying if input pose is with specified distance range.
"""
dist = float(np.linalg.norm(pose[0:2] - ego_center[0:2]))
return dist > d_min and dist < d_max
def get_distance(x1: np.ndarray, x2: np.ndarray, name: str) -> float:
"""Get the distance between two poses, returns nan if distance is larger than detection threshold.
Args:
x1: first pose
x2: second pose
name: name of the field to test
Returns:
A distance value or NaN
"""
dist = float(np.linalg.norm(x1[name][0:2] - x2[name][0:2]))
return dist if dist < 2.25 else float(np.nan)
def eval_tracks(
path_tracker_output: str,
path_dataset: _PathLike,
d_min: float,
d_max: float,
out_file: TextIO,
centroid_method: str,
) -> None:
"""Evaluate tracking output.
Args:
path_tracker_output: path to tracker output
path_dataset: path to dataset
d_min: minimum distance range
d_max: maximum distance range
out_file: output file object
centroid_method: method for ground truth centroid estimation
"""
acc = mm.MOTAccumulator(auto_id=True)
path_track_data = sorted(glob.glob(os.fspath(path_tracker_output) + "/*"))
log_id = pathlib.Path(path_dataset).name
logger.info("log_id = %s", log_id)
city_info_fpath = f"{path_dataset}/city_info.json"
city_info = read_json_file(city_info_fpath)
city_name = city_info["city_name"]
logger.info("city name = %s", city_name)
ID_gt_all: List[str] = []
for ind_frame in range(len(path_track_data)):
if ind_frame % 50 == 0:
logger.info("%d/%d" % (ind_frame, len(path_track_data)))
timestamp_lidar = int(path_track_data[ind_frame].split("/")[-1].split("_")[-1].split(".")[0])
path_gt = os.path.join(
path_dataset, "per_sweep_annotations_amodal", f"tracked_object_labels_{timestamp_lidar}.json"
)
if not os.path.exists(path_gt):
logger.warning("Missing ", path_gt)
continue
gt_data = read_json_file(path_gt)
pose_data = read_json_file(f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json")
rotation = np.array(pose_data["rotation"])
translation = np.array(pose_data["translation"])
ego_R = quat2rotmat(rotation)
ego_t = translation
egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
pc_raw0 = load_ply(os.path.join(path_dataset, f"lidar/PC_{timestamp_lidar}.ply"))
pc_raw_roi = leave_only_roi_region(
pc_raw0, egovehicle_to_city_se3, ground_removal_method="no", city_name=city_name
)
gt: Dict[str, Dict[str, Any]] = {}
id_gts = []
for i in range(len(gt_data)):
if gt_data[i]["label_class"] != "VEHICLE":
continue
bbox, orientation = label_to_bbox(gt_data[i])
pc_segment = get_pc_inside_bbox(pc_raw_roi, bbox)
center = np.array([gt_data[i]["center"]["x"], gt_data[i]["center"]["y"], gt_data[i]["center"]["z"]])
if (
len(pc_segment) >= min_point_num
and bbox[3] > 0
and in_distance_range_pose(np.zeros(3), center, d_min, d_max)
):
track_label_uuid = gt_data[i]["track_label_uuid"]
gt[track_label_uuid] = {}
if centroid_method == "average":
gt[track_label_uuid]["centroid"] = pc_segment.sum(axis=0) / len(pc_segment)
elif centroid_method == "label_center":
gt[track_label_uuid]["centroid"] = center
else:
logger.warning("Not implemented")
gt[track_label_uuid]["bbox"] = bbox
gt[track_label_uuid]["orientation"] = orientation
if track_label_uuid not in ID_gt_all:
ID_gt_all.append(track_label_uuid)
id_gts.append(track_label_uuid)
tracks: Dict[str, Dict[str, Any]] = {}
id_tracks: List[str] = []
track_data = read_json_file(path_track_data[ind_frame])
for track in track_data:
key = track["track_label_uuid"]
if track["label_class"] != "VEHICLE" or track["height"] == 0:
continue
center = np.array([track["center"]["x"], track["center"]["y"], track["center"]["z"]])
if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
tracks[key] = {}
tracks[key]["centroid"] = center
id_tracks.append(key)
dists: List[List[float]] = []
for gt_key, gt_value in gt.items():
gt_track_data: List[float] = []
dists.append(gt_track_data)
for track_key, track_value in tracks.items():
gt_track_data.append(get_distance(gt_value, track_value, "centroid"))
acc.update(id_gts, id_tracks, dists)
mh = mm.metrics.create()
summary = mh.compute(
acc,
metrics=[
"num_frames",
"mota",
"motp",
"idf1",
"mostly_tracked",
"mostly_lost",
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
],
name="acc",
)
logger.info("summary = %s", summary)
num_tracks = len(ID_gt_all)
fn = os.path.basename(path_tracker_output)
num_frames = summary["num_frames"][0]
mota = summary["mota"][0] * 100
motp = summary["motp"][0]
idf1 = summary["idf1"][0]
most_track = summary["mostly_tracked"][0] / num_tracks
most_lost = summary["mostly_lost"][0] / num_tracks
num_fp = summary["num_false_positives"][0]
num_miss = summary["num_misses"][0]
num_switch = summary["num_switches"][0]
num_flag = summary["num_fragmentations"][0]
out_string = (
f"{fn} {num_frames} {mota:.2f} {motp:.2f} {idf1:.2f} {most_track:.2f} "
f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
)
out_file.write(out_string)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--path_tracker_output",
type=str,
default="../../argodataset_30Hz/test_label/028d5cb1-f74d-366c-85ad-84fde69b0fd3",
)
parser.add_argument(
"--path_labels", type=str, default="../../argodataset_30Hz/labels_v32/028d5cb1-f74d-366c-85ad-84fde69b0fd3"
)
parser.add_argument("--path_dataset", type=str, default="../../argodataset_30Hz/cvpr_test_set")
parser.add_argument("--centroid_method", type=str, default="average", choices=["label_center", "average"])
parser.add_argument("--flag", type=str, default="")
parser.add_argument("--d_min", type=float, default=0)
parser.add_argument("--d_max", type=float, default=100, required=True)
args = parser.parse_args()
logger.info("args = %s", args)
tracker_basename = os.path.basename(args.path_tracker_output)
out_filename = f"{tracker_basename}_{args.flag}_{int(args.d_min)}_{int(args.d_max)}_{args.centroid_method}.txt"
logger.info("output file name = %s", out_filename)
with open(out_filename, "w") as out_file:
eval_tracks(args.path_tracker_output, args.path_dataset, args.d_min, args.d_max, out_file, args.centroid_method)
| 34.259109 | 120 | 0.627629 | # <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import argparse
import glob
import json
import logging
import os
import pathlib
from typing import Any, Dict, List, TextIO, Union
import motmetrics as mm
import numpy as np
from argoverse.evaluation.eval_utils import get_pc_inside_bbox, label_to_bbox, leave_only_roi_region
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.ply_loader import load_ply
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
min_point_num = 0
mh = mm.metrics.create()
logger = logging.getLogger(__name__)
_PathLike = Union[str, "os.PathLike[str]"]
def in_distance_range_pose(ego_center: np.ndarray, pose: np.ndarray, d_min: float, d_max: float) -> bool:
"""Determine if a pose is within distance range or not.
Args:
ego_center: ego center pose (zero if bbox is in ego frame).
pose: pose to test.
d_min: minimum distance range
d_max: maximum distance range
Returns:
A boolean saying if input pose is with specified distance range.
"""
dist = float(np.linalg.norm(pose[0:2] - ego_center[0:2]))
return dist > d_min and dist < d_max
def get_distance(x1: np.ndarray, x2: np.ndarray, name: str) -> float:
"""Get the distance between two poses, returns nan if distance is larger than detection threshold.
Args:
x1: first pose
x2: second pose
name: name of the field to test
Returns:
A distance value or NaN
"""
dist = float(np.linalg.norm(x1[name][0:2] - x2[name][0:2]))
return dist if dist < 2.25 else float(np.nan)
def eval_tracks(
path_tracker_output: str,
path_dataset: _PathLike,
d_min: float,
d_max: float,
out_file: TextIO,
centroid_method: str,
) -> None:
"""Evaluate tracking output.
Args:
path_tracker_output: path to tracker output
path_dataset: path to dataset
d_min: minimum distance range
d_max: maximum distance range
out_file: output file object
centroid_method: method for ground truth centroid estimation
"""
acc = mm.MOTAccumulator(auto_id=True)
path_track_data = sorted(glob.glob(os.fspath(path_tracker_output) + "/*"))
log_id = pathlib.Path(path_dataset).name
logger.info("log_id = %s", log_id)
city_info_fpath = f"{path_dataset}/city_info.json"
city_info = read_json_file(city_info_fpath)
city_name = city_info["city_name"]
logger.info("city name = %s", city_name)
ID_gt_all: List[str] = []
for ind_frame in range(len(path_track_data)):
if ind_frame % 50 == 0:
logger.info("%d/%d" % (ind_frame, len(path_track_data)))
timestamp_lidar = int(path_track_data[ind_frame].split("/")[-1].split("_")[-1].split(".")[0])
path_gt = os.path.join(
path_dataset, "per_sweep_annotations_amodal", f"tracked_object_labels_{timestamp_lidar}.json"
)
if not os.path.exists(path_gt):
logger.warning("Missing ", path_gt)
continue
gt_data = read_json_file(path_gt)
pose_data = read_json_file(f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json")
rotation = np.array(pose_data["rotation"])
translation = np.array(pose_data["translation"])
ego_R = quat2rotmat(rotation)
ego_t = translation
egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
pc_raw0 = load_ply(os.path.join(path_dataset, f"lidar/PC_{timestamp_lidar}.ply"))
pc_raw_roi = leave_only_roi_region(
pc_raw0, egovehicle_to_city_se3, ground_removal_method="no", city_name=city_name
)
gt: Dict[str, Dict[str, Any]] = {}
id_gts = []
for i in range(len(gt_data)):
if gt_data[i]["label_class"] != "VEHICLE":
continue
bbox, orientation = label_to_bbox(gt_data[i])
pc_segment = get_pc_inside_bbox(pc_raw_roi, bbox)
center = np.array([gt_data[i]["center"]["x"], gt_data[i]["center"]["y"], gt_data[i]["center"]["z"]])
if (
len(pc_segment) >= min_point_num
and bbox[3] > 0
and in_distance_range_pose(np.zeros(3), center, d_min, d_max)
):
track_label_uuid = gt_data[i]["track_label_uuid"]
gt[track_label_uuid] = {}
if centroid_method == "average":
gt[track_label_uuid]["centroid"] = pc_segment.sum(axis=0) / len(pc_segment)
elif centroid_method == "label_center":
gt[track_label_uuid]["centroid"] = center
else:
logger.warning("Not implemented")
gt[track_label_uuid]["bbox"] = bbox
gt[track_label_uuid]["orientation"] = orientation
if track_label_uuid not in ID_gt_all:
ID_gt_all.append(track_label_uuid)
id_gts.append(track_label_uuid)
tracks: Dict[str, Dict[str, Any]] = {}
id_tracks: List[str] = []
track_data = read_json_file(path_track_data[ind_frame])
for track in track_data:
key = track["track_label_uuid"]
if track["label_class"] != "VEHICLE" or track["height"] == 0:
continue
center = np.array([track["center"]["x"], track["center"]["y"], track["center"]["z"]])
if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
tracks[key] = {}
tracks[key]["centroid"] = center
id_tracks.append(key)
dists: List[List[float]] = []
for gt_key, gt_value in gt.items():
gt_track_data: List[float] = []
dists.append(gt_track_data)
for track_key, track_value in tracks.items():
gt_track_data.append(get_distance(gt_value, track_value, "centroid"))
acc.update(id_gts, id_tracks, dists)
mh = mm.metrics.create()
summary = mh.compute(
acc,
metrics=[
"num_frames",
"mota",
"motp",
"idf1",
"mostly_tracked",
"mostly_lost",
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
],
name="acc",
)
logger.info("summary = %s", summary)
num_tracks = len(ID_gt_all)
fn = os.path.basename(path_tracker_output)
num_frames = summary["num_frames"][0]
mota = summary["mota"][0] * 100
motp = summary["motp"][0]
idf1 = summary["idf1"][0]
most_track = summary["mostly_tracked"][0] / num_tracks
most_lost = summary["mostly_lost"][0] / num_tracks
num_fp = summary["num_false_positives"][0]
num_miss = summary["num_misses"][0]
num_switch = summary["num_switches"][0]
num_flag = summary["num_fragmentations"][0]
out_string = (
f"{fn} {num_frames} {mota:.2f} {motp:.2f} {idf1:.2f} {most_track:.2f} "
f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
)
out_file.write(out_string)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--path_tracker_output",
type=str,
default="../../argodataset_30Hz/test_label/028d5cb1-f74d-366c-85ad-84fde69b0fd3",
)
parser.add_argument(
"--path_labels", type=str, default="../../argodataset_30Hz/labels_v32/028d5cb1-f74d-366c-85ad-84fde69b0fd3"
)
parser.add_argument("--path_dataset", type=str, default="../../argodataset_30Hz/cvpr_test_set")
parser.add_argument("--centroid_method", type=str, default="average", choices=["label_center", "average"])
parser.add_argument("--flag", type=str, default="")
parser.add_argument("--d_min", type=float, default=0)
parser.add_argument("--d_max", type=float, default=100, required=True)
args = parser.parse_args()
logger.info("args = %s", args)
tracker_basename = os.path.basename(args.path_tracker_output)
out_filename = f"{tracker_basename}_{args.flag}_{int(args.d_min)}_{int(args.d_max)}_{args.centroid_method}.txt"
logger.info("output file name = %s", out_filename)
with open(out_filename, "w") as out_file:
eval_tracks(args.path_tracker_output, args.path_dataset, args.d_min, args.d_max, out_file, args.centroid_method)
| 0 | 0 | 0 |
99f394f8217046df0046db4afd0ebf860c353a3a | 441 | py | Python | targets/test_router/rec_pkt.py | liusheng198933/behavioral-model | c713879458e4d97266ba36ea1c18475490e8382d | [
"Apache-2.0"
] | null | null | null | targets/test_router/rec_pkt.py | liusheng198933/behavioral-model | c713879458e4d97266ba36ea1c18475490e8382d | [
"Apache-2.0"
] | null | null | null | targets/test_router/rec_pkt.py | liusheng198933/behavioral-model | c713879458e4d97266ba36ea1c18475490e8382d | [
"Apache-2.0"
] | null | null | null | from scapy.all import sniff, sendp
import struct
import sys
if __name__ == '__main__':
main()
| 20.045455 | 46 | 0.605442 | from scapy.all import sniff, sendp
import struct
import sys
def handle_pkt(pkt):
pkt = str(pkt)
preamble = pkt[:8]
preamble_exp = "\x00" * 8
if preamble != preamble_exp: return
pktTMP = struct.unpack("!I", pkt[8:12])[0]
msg = pkt[66:len(pkt)]
print pktTMP
print msg
sys.stdout.flush()
def main():
sniff(iface = "eth0",
prn = lambda x: handle_pkt(x))
if __name__ == '__main__':
main()
| 296 | 0 | 46 |
69204edc1381caa9d326ae4e310094b1c4177b34 | 1,181 | py | Python | esmvalcore/preprocessor/_derive/xco2.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | esmvalcore/preprocessor/_derive/xco2.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | esmvalcore/preprocessor/_derive/xco2.py | zklaus/ESMValCore | 5656fb8b546eeb4d750a424de7ed56a237edfabb | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """Derivation of variable ``xco2``."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
from ._shared import column_average
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``xco2``."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{'short_name': 'co2'},
{'short_name': 'hus'},
{'short_name': 'zg'},
{'short_name': 'ps'},
]
return required
@staticmethod
def calculate(cubes):
"""Calculate the column-averaged atmospheric CO2 [1e-6]."""
co2_cube = cubes.extract_cube(
Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))
print(co2_cube)
hus_cube = cubes.extract_cube(Constraint(name='specific_humidity'))
zg_cube = cubes.extract_cube(Constraint(name='geopotential_height'))
ps_cube = cubes.extract_cube(Constraint(name='surface_air_pressure'))
# Column-averaged CO2
xco2_cube = column_average(co2_cube, hus_cube, zg_cube, ps_cube)
xco2_cube.convert_units('1')
return xco2_cube
| 31.078947 | 77 | 0.645216 | """Derivation of variable ``xco2``."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
from ._shared import column_average
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``xco2``."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{'short_name': 'co2'},
{'short_name': 'hus'},
{'short_name': 'zg'},
{'short_name': 'ps'},
]
return required
@staticmethod
def calculate(cubes):
"""Calculate the column-averaged atmospheric CO2 [1e-6]."""
co2_cube = cubes.extract_cube(
Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))
print(co2_cube)
hus_cube = cubes.extract_cube(Constraint(name='specific_humidity'))
zg_cube = cubes.extract_cube(Constraint(name='geopotential_height'))
ps_cube = cubes.extract_cube(Constraint(name='surface_air_pressure'))
# Column-averaged CO2
xco2_cube = column_average(co2_cube, hus_cube, zg_cube, ps_cube)
xco2_cube.convert_units('1')
return xco2_cube
| 0 | 0 | 0 |
56d16a3329cb464251701a96d5087b3a275d954a | 264 | py | Python | art/_yaml.py | kosma/art | 2947e8f170139b8c4d2378a1a3e86c13548a57d1 | [
"WTFPL"
] | 9 | 2017-07-30T21:45:24.000Z | 2020-04-01T16:31:12.000Z | art/_yaml.py | ellisto/gitlab-art | 2947e8f170139b8c4d2378a1a3e86c13548a57d1 | [
"WTFPL"
] | 15 | 2020-04-07T08:57:06.000Z | 2020-07-02T17:50:44.000Z | art/_yaml.py | kosma/art | 2947e8f170139b8c4d2378a1a3e86c13548a57d1 | [
"WTFPL"
] | 3 | 2018-01-08T17:20:57.000Z | 2020-03-31T00:07:00.000Z | # -*- coding: utf-8 -*-
import yaml
| 18.857143 | 68 | 0.640152 | # -*- coding: utf-8 -*-
import yaml
def load(path):
with open(path, 'r') as stream:
return yaml.safe_load(stream=stream)
def save(path, obj):
with open(path, 'w') as stream:
yaml.safe_dump(obj, stream=stream, default_flow_style=False)
| 179 | 0 | 46 |
cbbeeaeaf2df763f524eea85585bb316de69a79d | 3,331 | py | Python | examples/blacklist.py | Leechael/flask-jwt-extended | aff5175ab65d4fa12b58f2b46c83c1cbfb13cca6 | [
"MIT"
] | null | null | null | examples/blacklist.py | Leechael/flask-jwt-extended | aff5175ab65d4fa12b58f2b46c83c1cbfb13cca6 | [
"MIT"
] | 1 | 2021-03-25T22:35:35.000Z | 2021-03-25T22:35:35.000Z | examples/blacklist.py | caser789/flask-jwt | 4b06a20799f5bee237763add3af5b80c79ac684f | [
"MIT"
] | 1 | 2021-05-13T18:15:55.000Z | 2021-05-13T18:15:55.000Z | from datetime import timedelta
import simplekv
import simplekv.memory
from flask import Flask, request, jsonify
from flask_jwt_extended import JWTManager, jwt_required, \
get_jwt_identity, revoke_token, unrevoke_token, \
get_stored_tokens, get_all_stored_tokens, create_access_token, \
create_refresh_token, jwt_refresh_token_required, get_stored_token
# Setup flask
app = Flask(__name__)
app.secret_key = 'super-secret'
# Configure access token expires time
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=5)
# Enable and configure the JWT blacklist / token revoke. We are using an in
# memory store for this example. In production, you should use something
# persistant (such as redis, memcached, sqlalchemy). See here for options:
# http://pythonhosted.org/simplekv/
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'refresh'
jwt = JWTManager(app)
@app.route('/login', methods=['POST'])
@app.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
# Endpoint for listing tokens that have the same identity as you
@app.route('/auth/tokens', methods=['GET'])
@jwt_required
# Endpoint for listing all tokens. In your app, you should either not expose
# this endpoint, or put some addition security on top of it so only trusted users,
# (administrators, etc) can access it
@app.route('/auth/all-tokens')
# Endpoint for allowing users to revoke their tokens
@app.route('/auth/tokens/revoke/<string:jti>', methods=['PUT'])
@jwt_required
@app.route('/auth/tokens/unrevoke/<string:jti>', methods=['PUT'])
@jwt_required
@app.route('/protected', methods=['GET'])
@jwt_required
if __name__ == '__main__':
app.run()
| 31.424528 | 82 | 0.710597 | from datetime import timedelta
import simplekv
import simplekv.memory
from flask import Flask, request, jsonify
from flask_jwt_extended import JWTManager, jwt_required, \
get_jwt_identity, revoke_token, unrevoke_token, \
get_stored_tokens, get_all_stored_tokens, create_access_token, \
create_refresh_token, jwt_refresh_token_required, get_stored_token
# Setup flask
app = Flask(__name__)
app.secret_key = 'super-secret'
# Configure access token expires time
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=5)
# Enable and configure the JWT blacklist / token revoke. We are using an in
# memory store for this example. In production, you should use something
# persistant (such as redis, memcached, sqlalchemy). See here for options:
# http://pythonhosted.org/simplekv/
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'refresh'
jwt = JWTManager(app)
@app.route('/login', methods=['POST'])
def login():
username = request.json.get('username', None)
password = request.json.get('password', None)
if username != 'test' and password != 'test':
return jsonify({"msg": "Bad username or password"}), 401
ret = {
'access_token': create_access_token(identity=username),
'refresh_token': create_refresh_token(identity=username)
}
return jsonify(ret), 200
@app.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user)
}
return jsonify(ret), 200
# Endpoint for listing tokens that have the same identity as you
@app.route('/auth/tokens', methods=['GET'])
@jwt_required
def list_identity_tokens():
username = get_jwt_identity()
return jsonify(get_stored_tokens(username)), 200
# Endpoint for listing all tokens. In your app, you should either not expose
# this endpoint, or put some addition security on top of it so only trusted users,
# (administrators, etc) can access it
@app.route('/auth/all-tokens')
def list_all_tokens():
return jsonify(get_all_stored_tokens()), 200
# Endpoint for allowing users to revoke their tokens
@app.route('/auth/tokens/revoke/<string:jti>', methods=['PUT'])
@jwt_required
def change_jwt_revoke_state(jti):
username = get_jwt_identity()
try:
token_data = get_stored_token(jti)
if token_data['token']['identity'] != username:
raise KeyError
revoke_token(jti)
return jsonify({"msg": "Token successfully revoked"}), 200
except KeyError:
return jsonify({'msg': 'Token not found'}), 404
@app.route('/auth/tokens/unrevoke/<string:jti>', methods=['PUT'])
@jwt_required
def change_jwt_unrevoke_state(jti):
username = get_jwt_identity()
try:
token_data = get_stored_token(jti)
if token_data['token']['identity'] != username:
raise KeyError
unrevoke_token(jti)
return jsonify({"msg": "Token successfully unrevoked"}), 200
except KeyError:
return jsonify({'msg': 'Token not found'}), 404
@app.route('/protected', methods=['GET'])
@jwt_required
def protected():
return jsonify({'hello': 'world'})
if __name__ == '__main__':
app.run()
| 1,413 | 0 | 154 |
7d6f0cea619c754982d40159fba61994c36df25d | 45 | py | Python | new.py | debriv/pweb1 | 43087f3d36ed821e35b26f176380de8c0fe3c517 | [
"Apache-2.0"
] | null | null | null | new.py | debriv/pweb1 | 43087f3d36ed821e35b26f176380de8c0fe3c517 | [
"Apache-2.0"
] | null | null | null | new.py | debriv/pweb1 | 43087f3d36ed821e35b26f176380de8c0fe3c517 | [
"Apache-2.0"
] | null | null | null | n = input( "Name: ")
print(f"My name is {n}") | 22.5 | 24 | 0.555556 | n = input( "Name: ")
print(f"My name is {n}") | 0 | 0 | 0 |
cf7c86a00dd6af622a50f8e0c3b25e86ae1a5b41 | 14,837 | py | Python | tests/input/field_types/test_field_type_string.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | tests/input/field_types/test_field_type_string.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | tests/input/field_types/test_field_type_string.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | """Testing TcEx Input module field types."""
# standard library
from typing import TYPE_CHECKING, Dict, List, Optional, Union
# third-party
import pytest
from pydantic import BaseModel, validator
# first-party
from tcex.input.field_types import String, always_array, conditional_required, string
from tcex.pleb.scoped_property import scoped_property
from tests.input.field_types.utils import InputTest
if TYPE_CHECKING:
# first-party
from tests.mock_app import MockApp
# pylint: disable=no-self-argument, no-self-use
class TestInputsFieldTypes(InputTest):
"""Test TcEx String Field Model Tests."""
def setup_method(self):
"""Configure setup before all tests."""
scoped_property._reset()
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
('string', 'string', False, False),
# required, empty input
('', '', False, False),
# optional, empty input
('', '', True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[String]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
(
'input_value,expected,allow_empty,conditional_required_rules,'
'max_length,min_length,regex,optional,fail_test'
),
[
#
# Pass Testing
#
# required, normal input
('string', 'string', True, None, None, None, None, False, False),
# required, empty input
('', '', True, None, None, None, None, False, False),
# optional, empty input
('', '', True, None, None, None, None, True, False),
# optional, null input
(None, None, True, None, None, None, None, True, False),
# required, normal input, max_length=10
('string', 'string', True, None, 10, None, None, False, False),
# optional, normal input, max_length=10
('string', 'string', True, None, 10, None, None, True, False),
# required, normal input, min_length=2
('string', 'string', True, None, None, 2, None, False, False),
# optional, normal input, min_length=2
('string', 'string', True, None, None, 2, None, True, False),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string$', True, False),
# optional, null input, conditional_required=True
(
None,
None,
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'optional'}],
None,
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(None, None, True, None, None, None, None, False, True),
# required, empty input, allow_empty=False
('', None, False, None, None, None, None, False, True),
# required, empty input, conditional_required=True
(
'',
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, null input, conditional_required=True
(
None,
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, normal input, max_length=2
('string', 'string', True, None, 2, None, None, False, True),
# optional, normal input, max_length=2
('string', 'string', True, None, 2, None, None, True, True),
# required, normal input, min_length=10
('string', 'string', True, None, None, 10, None, False, True),
# optional, normal input, min_length=10
('string', 'string', True, None, None, 10, None, True, True),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string-extra$', True, True),
],
)
def test_field_model_string_custom_input(
self,
input_value: str,
expected: str,
allow_empty: bool,
conditional_required_rules: Optional[List[Dict[str, str]]],
max_length: int,
min_length: int,
regex: Optional[str],
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: Optional[
string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
]
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(['string'], ['string'], False, False),
# required, empty input
([], [], False, False),
# optional, empty input
([], [], True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_array_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: List[String]
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[List[String]]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='StringArray',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,input_type,optional,fail_test',
[
#
# Pass Testing
#
# required, string input
('string', ['string'], 'String', False, False),
# required, array input
(['string'], ['string'], 'StringArray', False, False),
# required, empty string input
('', [], 'String', False, False),
# required, empty array input
([], [], 'StringArray', False, False),
# optional, empty string input
('', [], 'String', True, False),
# optional, empty array input
([], [], 'StringArray', True, False),
# optional, null input
(None, [], 'String', True, False),
#
# Fail Testing
#
# required, null input
(None, None, 'String', False, True),
],
)
def test_field_model_string_union_input(
self,
input_value: str,
expected: str,
input_type: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Union[String, List[String]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[Union[String, List[String]]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type=input_type,
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
('nested_reference,nested_value,value,expected_value'),
[
(
'#App:1234:my_ref!String',
'nested string',
'string with nested string: #App:1234:my_ref!String',
'string with nested string: nested string',
),
(
'#App:1234:my_ref!StringArray',
['nested string'],
'string with nested value: #App:1234:my_ref!StringArray',
'string with nested value: ["nested string"]',
),
(
'#App:1234:my_ref!Binary',
b'nested string',
'string with nested string: #App:1234:my_ref!Binary',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!BinaryArray',
[b'nested string'],
'string with nested string: #App:1234:my_ref!BinaryArray',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!KeyValue',
{'key': 'key', 'value': 'value', 'type': 'any'},
'string with nested string: #App:1234:my_ref!KeyValue',
'string with nested string: {"key": "key", "value": "value", "type": "any"}',
),
(
'#App:1234:my_ref!KeyValueArray',
[{'key': 'key', 'value': 'value', 'type': 'any'}],
'string with nested string: #App:1234:my_ref!KeyValueArray',
'string with nested string: [{"key": "key", "value": "value", "type": "any"}]',
),
(
'#App:1234:my_ref!TCEntity',
{'id': '1', 'value': '1.1.1.1', 'type': 'Address'},
'string with nested string: #App:1234:my_ref!TCEntity',
'string with nested string: {"id": "1", "value": "1.1.1.1", "type": "Address"}',
),
(
'#App:1234:my_ref!TCEntityArray',
[{'id': '1', 'value': '1.1.1.1', 'type': 'Address'}],
'string with nested string: #App:1234:my_ref!TCEntityArray',
'string with nested string: [{"id": "1", "value": "1.1.1.1", "type": "Address"}]',
),
(
'#App:1234:my_ref!String',
None,
'string with nested string: #App:1234:my_ref!String',
'string with nested string: <null>',
),
],
)
def test_field_type_string_with_nested_reference(
self,
nested_reference,
nested_value,
value,
expected_value,
playbook_app: 'MockApp',
):
"""Test String field type with nested reference.
Args:
nested_reference: nested variable reference found within string
nested_value: the value that nested_reference should resolve to
value: the String value exactly as passed in from the UI
expected_value: The String value as passed in from the UI after nested reference
is resolved
playbook_app (fixture): An instance of MockApp.
"""
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
config_data = {'my_data': '#App:1234:my_data!String'}
app = playbook_app(config_data=config_data)
tcex = app.tcex
self._stage_key_value('my_ref', nested_reference, nested_value, tcex)
self._stage_key_value(
'my_data',
'#App:1234:my_data!String',
value,
tcex,
)
tcex.inputs.add_model(PytestModel)
assert tcex.inputs.model.my_data == expected_value
| 32.395197 | 98 | 0.497405 | """Testing TcEx Input module field types."""
# standard library
from typing import TYPE_CHECKING, Dict, List, Optional, Union
# third-party
import pytest
from pydantic import BaseModel, validator
# first-party
from tcex.input.field_types import String, always_array, conditional_required, string
from tcex.pleb.scoped_property import scoped_property
from tests.input.field_types.utils import InputTest
if TYPE_CHECKING:
# first-party
from tests.mock_app import MockApp
# pylint: disable=no-self-argument, no-self-use
class TestInputsFieldTypes(InputTest):
"""Test TcEx String Field Model Tests."""
def setup_method(self):
"""Configure setup before all tests."""
scoped_property._reset()
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
('string', 'string', False, False),
# required, empty input
('', '', False, False),
# optional, empty input
('', '', True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[String]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
(
'input_value,expected,allow_empty,conditional_required_rules,'
'max_length,min_length,regex,optional,fail_test'
),
[
#
# Pass Testing
#
# required, normal input
('string', 'string', True, None, None, None, None, False, False),
# required, empty input
('', '', True, None, None, None, None, False, False),
# optional, empty input
('', '', True, None, None, None, None, True, False),
# optional, null input
(None, None, True, None, None, None, None, True, False),
# required, normal input, max_length=10
('string', 'string', True, None, 10, None, None, False, False),
# optional, normal input, max_length=10
('string', 'string', True, None, 10, None, None, True, False),
# required, normal input, min_length=2
('string', 'string', True, None, None, 2, None, False, False),
# optional, normal input, min_length=2
('string', 'string', True, None, None, 2, None, True, False),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string$', True, False),
# optional, null input, conditional_required=True
(
None,
None,
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'optional'}],
None,
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(None, None, True, None, None, None, None, False, True),
# required, empty input, allow_empty=False
('', None, False, None, None, None, None, False, True),
# required, empty input, conditional_required=True
(
'',
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, null input, conditional_required=True
(
None,
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, normal input, max_length=2
('string', 'string', True, None, 2, None, None, False, True),
# optional, normal input, max_length=2
('string', 'string', True, None, 2, None, None, True, True),
# required, normal input, min_length=10
('string', 'string', True, None, None, 10, None, False, True),
# optional, normal input, min_length=10
('string', 'string', True, None, None, 10, None, True, True),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string-extra$', True, True),
],
)
def test_field_model_string_custom_input(
self,
input_value: str,
expected: str,
allow_empty: bool,
conditional_required_rules: Optional[List[Dict[str, str]]],
max_length: int,
min_length: int,
regex: Optional[str],
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: Optional[
string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
]
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(['string'], ['string'], False, False),
# required, empty input
([], [], False, False),
# optional, empty input
([], [], True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_array_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: List[String]
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[List[String]]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='StringArray',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,input_type,optional,fail_test',
[
#
# Pass Testing
#
# required, string input
('string', ['string'], 'String', False, False),
# required, array input
(['string'], ['string'], 'StringArray', False, False),
# required, empty string input
('', [], 'String', False, False),
# required, empty array input
([], [], 'StringArray', False, False),
# optional, empty string input
('', [], 'String', True, False),
# optional, empty array input
([], [], 'StringArray', True, False),
# optional, null input
(None, [], 'String', True, False),
#
# Fail Testing
#
# required, null input
(None, None, 'String', False, True),
],
)
def test_field_model_string_union_input(
self,
input_value: str,
expected: str,
input_type: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Union[String, List[String]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[Union[String, List[String]]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type=input_type,
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
('nested_reference,nested_value,value,expected_value'),
[
(
'#App:1234:my_ref!String',
'nested string',
'string with nested string: #App:1234:my_ref!String',
'string with nested string: nested string',
),
(
'#App:1234:my_ref!StringArray',
['nested string'],
'string with nested value: #App:1234:my_ref!StringArray',
'string with nested value: ["nested string"]',
),
(
'#App:1234:my_ref!Binary',
b'nested string',
'string with nested string: #App:1234:my_ref!Binary',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!BinaryArray',
[b'nested string'],
'string with nested string: #App:1234:my_ref!BinaryArray',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!KeyValue',
{'key': 'key', 'value': 'value', 'type': 'any'},
'string with nested string: #App:1234:my_ref!KeyValue',
'string with nested string: {"key": "key", "value": "value", "type": "any"}',
),
(
'#App:1234:my_ref!KeyValueArray',
[{'key': 'key', 'value': 'value', 'type': 'any'}],
'string with nested string: #App:1234:my_ref!KeyValueArray',
'string with nested string: [{"key": "key", "value": "value", "type": "any"}]',
),
(
'#App:1234:my_ref!TCEntity',
{'id': '1', 'value': '1.1.1.1', 'type': 'Address'},
'string with nested string: #App:1234:my_ref!TCEntity',
'string with nested string: {"id": "1", "value": "1.1.1.1", "type": "Address"}',
),
(
'#App:1234:my_ref!TCEntityArray',
[{'id': '1', 'value': '1.1.1.1', 'type': 'Address'}],
'string with nested string: #App:1234:my_ref!TCEntityArray',
'string with nested string: [{"id": "1", "value": "1.1.1.1", "type": "Address"}]',
),
(
'#App:1234:my_ref!String',
None,
'string with nested string: #App:1234:my_ref!String',
'string with nested string: <null>',
),
],
)
def test_field_type_string_with_nested_reference(
self,
nested_reference,
nested_value,
value,
expected_value,
playbook_app: 'MockApp',
):
"""Test String field type with nested reference.
Args:
nested_reference: nested variable reference found within string
nested_value: the value that nested_reference should resolve to
value: the String value exactly as passed in from the UI
expected_value: The String value as passed in from the UI after nested reference
is resolved
playbook_app (fixture): An instance of MockApp.
"""
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
config_data = {'my_data': '#App:1234:my_data!String'}
app = playbook_app(config_data=config_data)
tcex = app.tcex
self._stage_key_value('my_ref', nested_reference, nested_value, tcex)
self._stage_key_value(
'my_data',
'#App:1234:my_data!String',
value,
tcex,
)
tcex.inputs.add_model(PytestModel)
assert tcex.inputs.model.my_data == expected_value
| 0 | 0 | 0 |
121c48fedb552f8d62a31c4d14eba09328adfaf4 | 342 | py | Python | app/helpers.py | spark8103/dlop2 | 7f35ccb603af97c2d344a9db86f5fa33a8f73c8f | [
"Apache-2.0"
] | null | null | null | app/helpers.py | spark8103/dlop2 | 7f35ccb603af97c2d344a9db86f5fa33a8f73c8f | [
"Apache-2.0"
] | 1 | 2017-07-22T21:22:24.000Z | 2017-07-22T21:22:24.000Z | app/helpers.py | spark8103/dlop2 | 7f35ccb603af97c2d344a9db86f5fa33a8f73c8f | [
"Apache-2.0"
] | null | null | null | import time
import base64
import hashlib
import functools
from flask import g, request, session, current_app
from flask import flash, url_for, redirect, abort
from flask.ext.babel import lazy_gettext as _
from .models import Asset_model
| 21.375 | 50 | 0.751462 | import time
import base64
import hashlib
import functools
from flask import g, request, session, current_app
from flask import flash, url_for, redirect, abort
from flask.ext.babel import lazy_gettext as _
from .models import Asset_model
def force_int(value, default=1):
try:
return int(value)
except:
return default
| 81 | 0 | 23 |
a3cff8a63dce741ea09295246519053f01984fb9 | 1,309 | py | Python | testweet/listFiltereddirectMessages.py | fuzzyhandle/pihangout | 9382a9bde92d753d52584d99e2e52987d504da54 | [
"MIT"
] | null | null | null | testweet/listFiltereddirectMessages.py | fuzzyhandle/pihangout | 9382a9bde92d753d52584d99e2e52987d504da54 | [
"MIT"
] | null | null | null | testweet/listFiltereddirectMessages.py | fuzzyhandle/pihangout | 9382a9bde92d753d52584d99e2e52987d504da54 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, print_function
from datetime import datetime
import json
import urllib
import time
from pprint import pprint
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from itertools import ifilter
if __name__ == '__main__':
starttime = datetime.now()
api = getTwitterAPIHandle()
myid = api.me().id
#Get Default number of recent tweets
dm = api.direct_messages()
#print (dm)
filters= (filter_for_author,)
for twt in nFilter(filters, dm):
print (twt.text.encode('utf-8'))
print (twt.created_at) | 24.240741 | 82 | 0.722689 | from __future__ import absolute_import, print_function
from datetime import datetime
import json
import urllib
import time
from pprint import pprint
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from itertools import ifilter
def getTwitterAPIHandle():
with open('api_secret_token.json') as data_file:
authdata = json.load(data_file)
#pprint(authdata)
auth = OAuthHandler(authdata['consumer_key'], authdata['consumer_secret'])
auth.set_access_token(authdata['access_token'], authdata['access_token_secret'])
api = API(auth)
return api
def filter_has_my_mention(twt):
for um in twt.entities['user_mentions']:
if um['id'] == myid:
return True
return False
def filter_for_author(twt):
return twt.sender_screen_name == 'hrishikesh_date'
def nFilter(filters, tuples):
for f in filters:
tuples = ifilter(f, tuples)
return tuples
if __name__ == '__main__':
starttime = datetime.now()
api = getTwitterAPIHandle()
myid = api.me().id
#Get Default number of recent tweets
dm = api.direct_messages()
#print (dm)
filters= (filter_for_author,)
for twt in nFilter(filters, dm):
print (twt.text.encode('utf-8'))
print (twt.created_at) | 565 | 0 | 104 |
ec1e47db5f2ec0dc6381106a7822899e4b8a3ca3 | 1,033 | py | Python | lib/model/hough_voting/modules/hough_voting.py | mrlooi/Detectron.pytorch | e1bde064b763de1428c7494b86a365baff4fa1d4 | [
"MIT"
] | null | null | null | lib/model/hough_voting/modules/hough_voting.py | mrlooi/Detectron.pytorch | e1bde064b763de1428c7494b86a365baff4fa1d4 | [
"MIT"
] | null | null | null | lib/model/hough_voting/modules/hough_voting.py | mrlooi/Detectron.pytorch | e1bde064b763de1428c7494b86a365baff4fa1d4 | [
"MIT"
] | null | null | null | from torch.nn.modules.module import Module
from ..functions.hough_voting import HoughVotingFunction
# from functions.hough_voting import HoughVotingFunction
| 49.190476 | 148 | 0.762827 | from torch.nn.modules.module import Module
from ..functions.hough_voting import HoughVotingFunction
# from functions.hough_voting import HoughVotingFunction
class HoughVoting(Module):
def __init__(self, num_classes, threshold_vote, threshold_percentage, label_threshold=500, inlier_threshold=0.9, skip_pixels=1, is_train=False):
super(HoughVoting, self).__init__()
self.num_classes = num_classes
self.label_threshold = int(label_threshold)
self.inlier_threshold = float(inlier_threshold)
self.threshold_vote = float(threshold_vote)
self.threshold_percentage = float(threshold_percentage)
self.skip_pixels = int(skip_pixels)
self.is_train = is_train
def forward(self, label_2d, vertex_pred, extents, poses, meta_data):
return HoughVotingFunction(self.num_classes, self.threshold_vote, self.threshold_percentage,
self.label_threshold, self.inlier_threshold, self.skip_pixels, self.is_train)(label_2d, vertex_pred, extents, poses, meta_data)
| 795 | 5 | 76 |
c6353098091af6c161147e9ff1d4891704d585f8 | 3,118 | py | Python | trading_strategy_test.py | alfredholmes/shortterm_rnn | fe79423db80400a1d663af6f35f46b3802cbdb30 | [
"MIT"
] | null | null | null | trading_strategy_test.py | alfredholmes/shortterm_rnn | fe79423db80400a1d663af6f35f46b3802cbdb30 | [
"MIT"
] | null | null | null | trading_strategy_test.py | alfredholmes/shortterm_rnn | fe79423db80400a1d663af6f35f46b3802cbdb30 | [
"MIT"
] | null | null | null | import rnn
import csv, random
from functions import *
#get the price and volume data from file
if __name__ == '__main__':
main()
| 35.033708 | 176 | 0.614817 | import rnn
import csv, random
from functions import *
#get the price and volume data from file
def main():
scale = 3
inputs = 168
forcaster = rnn.TimeSeriesForcaster('forcaster', inputs, 2, 2, 0.001)
forcaster.restore()
daily = get_price_volume_data('hourly_btc.csv', 1, 2)
#split daily data up into inputs day chunks and collect the outputs
chunks = []
outputs = []
for i in range(len(daily) - inputs):
chunks.append(daily[i:i+inputs])
outputs.append(daily[i+inputs])
#scale the data
min_max = []
for i in range(len(chunks)):
chunks[i], min, max = scale_input(chunks[i], scale)
outputs[i] = scale_output([outputs[i]], min, max, scale)[0]
min_max.append([min, max])
input = parse_input(chunks)
output = parse_output(outputs)
#print(outputs)
total_days = len(input)
train_cutoff = int(len(input) * 8 / 10)
#print(output[:train_cutoff])
#calculate errors for price
print('Calculating errors')
price_error_paramaters = forcaster.get_error_for_metric(0, input[train_cutoff:], output[train_cutoff:])
volume_error_paramaters = forcaster.get_error_for_metric(1, input[train_cutoff:], output[train_cutoff:])
#forecast from the train train_cutoff
price_error_paramaters = {'mean': price_error_paramaters[0], 'variance': price_error_paramaters[1] - price_error_paramaters[1]**2 }
volume_error_paramaters = {'mean': volume_error_paramaters[0], 'variance': volume_error_paramaters[1] - volume_error_paramaters[1]**2 }
#mean = variance = 0
USD = 1000
BTC = 0
currency = 0 # 0 for USD 1 for BTC
hold_btc = USD / daily[-train_cutoff]['price']
for i in range(-train_cutoff, -inputs, 24):
print(i)
price_volume = daily[i:i+inputs]
start_price = price_volume[0]['price']
higher = 0
for j in range(20):
for k in range(24):
scaled, min, max = scale_input(price_volume, scale)
scaled = parse_input([scaled])
output = forcaster.predict(scaled)[0]
price = unscale([output[0] + random.gauss(-price_error_paramaters['mean'], price_error_paramaters['variance']**0.5)], min['price'], max['price'], scale)[0]
volume = unscale([output[1] + random.gauss(-volume_error_paramaters['mean'], volume_error_paramaters['variance']**0.5)], min['volume'], max['volume'], scale)[0]
output = {'price': price, 'volume': volume}
#price_volume.append(price)
price_volume.append(output)
price_volume = price_volume[1:]
if price_volume[-1]['price'] > start_price:
higher = higher + 1
if higher > 10 and BTC == 0:
BTC = USD / start_price
USD = 0
elif higher < 10 and USD == 0:
USD = BTC * start_price
BTC = 0
total = USD + BTC * daily[i]['price']
print('Total Value: ' + str(total) + 'Hold btc value: ' +str(hold_btc * daily[i]['price']))
if __name__ == '__main__':
main()
| 2,958 | 0 | 23 |
0e517074ab184506d992b2d3ed97bbce9eab6677 | 4,053 | py | Python | machine_learning/torch_anomaly_detection/src/data/celeba.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 4 | 2020-07-25T01:20:08.000Z | 2020-10-03T12:58:15.000Z | machine_learning/torch_anomaly_detection/src/data/celeba.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 29 | 2019-09-30T08:04:14.000Z | 2022-03-12T13:51:08.000Z | machine_learning/torch_anomaly_detection/src/data/celeba.py | iimuz/til | b100438e8ce2f369331b3be215a4b9cdce9ffda5 | [
"MIT"
] | 1 | 2020-08-14T05:15:51.000Z | 2020-08-14T05:15:51.000Z | """CelebA Dataset.
Notes:
- `http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html`
"""
# default packages
import logging
import pathlib
import shutil
import typing as t
import zipfile
# third party packages
import pandas as pd
import requests
import tqdm as tqdm_std
# my packages
import src.data.dataset as dataset
import src.data.utils as ut
# logger
_logger = logging.getLogger(__name__)
def _download(filepath: pathlib.Path, chunksize: int = 32768) -> None:
"""Download CelebA Dataset.
Args:
filepath (pathlib.Path): ダウンロードしたファイルを置くファイルパス.
chunksize (int, optional): ダウンロードのチャンクサイズ. Defaults to 32768.
Notes:
- reference:
`https://gist.github.com/charlesreid1/4f3d676b33b95fce83af08e4ec261822`
"""
URL = "https://docs.google.com/uc?export=download"
ID = "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
with requests.Session() as session:
params: t.Dict[str, t.Any] = dict(id=ID)
response = session.get(URL, params=params, stream=True)
params["confirm"] = _get_confirm_token(response)
response = session.get(URL, params=params, stream=True)
_save_response_content(response, filepath, chunksize)
def _get_confirm_token(response: requests.Response) -> t.Optional[str]:
"""トークンを生成する.
Args:
response (requests.Response): 取得する先のレスポンス.
Returns:
t.Optional[str]: トークン.
"""
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def _save_response_content(
response: requests.Response, filepath: pathlib.Path, chunksize: int = 32768,
) -> None:
"""レスポンス内容をファイルとして保存する.
Args:
response (requests.Response): レスポンス.
filepath (pathlib.Path): 保存先のファイルパス.
chunksize (int, optional): ダウンロードするチャンクサイズ. Defaults to 32768.
"""
with open(str(filepath), "wb") as f:
for chunk in tqdm_std.tqdm(response.iter_content(chunksize)):
if chunk:
f.write(chunk)
def main() -> None:
"""Celeba データセットをダウンロードし、学習及びテスト用のファイルリストを生成する."""
celeba = Celeba()
celeba.save()
if __name__ == "__main__":
try:
ut.init_root_logger()
main()
except Exception as e:
_logger.exception(e)
| 29.158273 | 83 | 0.630891 | """CelebA Dataset.
Notes:
- `http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html`
"""
# default packages
import logging
import pathlib
import shutil
import typing as t
import zipfile
# third party packages
import pandas as pd
import requests
import tqdm as tqdm_std
# my packages
import src.data.dataset as dataset
import src.data.utils as ut
# logger
_logger = logging.getLogger(__name__)
class Celeba(dataset.Dataset):
def __init__(self) -> None:
super().__init__()
self.archive_file = self.path.joinpath("img_align_celeba.zip")
self.datadir = self.path.joinpath("img_align_celeba")
self.train_list = self.path.joinpath("train.csv")
self.valid_list = self.path.joinpath("valid.csv")
def save_dataset(self, reprocess: bool) -> None:
if reprocess:
_logger.info("=== reporcess mode. delete existing data.")
shutil.rmtree(self.path)
self.path.mkdir(exist_ok=True)
if not self.datadir.exists():
if not self.archive_file.exists():
_logger.info("=== download zip file.")
_download(self.archive_file)
_logger.info("=== unzip.")
with zipfile.ZipFile(str(self.archive_file)) as z:
z.extractall(str(self.path))
if not self.train_list.exists() and not self.valid_list.exists():
_logger.info("=== create train and valid file list.")
filelist = sorted(
[p.relative_to(self.path) for p in self.path.glob("**/*.jpg")]
)
train_ratio = 0.8
train_num = int(len(filelist) * train_ratio)
if not self.train_list.exists():
train_list = pd.DataFrame({"filepath": filelist[:train_num]})
train_list.to_csv(self.train_list, index=False)
if not self.valid_list.exists():
valid_list = pd.DataFrame({"filepath": filelist[train_num:]})
valid_list.to_csv(self.valid_list, index=False)
def load_dataset(self) -> None:
self.train = pd.read_csv(self.train_list)
self.valid = pd.read_csv(self.valid_list)
def _download(filepath: pathlib.Path, chunksize: int = 32768) -> None:
"""Download CelebA Dataset.
Args:
filepath (pathlib.Path): ダウンロードしたファイルを置くファイルパス.
chunksize (int, optional): ダウンロードのチャンクサイズ. Defaults to 32768.
Notes:
- reference:
`https://gist.github.com/charlesreid1/4f3d676b33b95fce83af08e4ec261822`
"""
URL = "https://docs.google.com/uc?export=download"
ID = "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
with requests.Session() as session:
params: t.Dict[str, t.Any] = dict(id=ID)
response = session.get(URL, params=params, stream=True)
params["confirm"] = _get_confirm_token(response)
response = session.get(URL, params=params, stream=True)
_save_response_content(response, filepath, chunksize)
def _get_confirm_token(response: requests.Response) -> t.Optional[str]:
"""トークンを生成する.
Args:
response (requests.Response): 取得する先のレスポンス.
Returns:
t.Optional[str]: トークン.
"""
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def _save_response_content(
response: requests.Response, filepath: pathlib.Path, chunksize: int = 32768,
) -> None:
"""レスポンス内容をファイルとして保存する.
Args:
response (requests.Response): レスポンス.
filepath (pathlib.Path): 保存先のファイルパス.
chunksize (int, optional): ダウンロードするチャンクサイズ. Defaults to 32768.
"""
with open(str(filepath), "wb") as f:
for chunk in tqdm_std.tqdm(response.iter_content(chunksize)):
if chunk:
f.write(chunk)
def main() -> None:
"""Celeba データセットをダウンロードし、学習及びテスト用のファイルリストを生成する."""
celeba = Celeba()
celeba.save()
if __name__ == "__main__":
try:
ut.init_root_logger()
main()
except Exception as e:
_logger.exception(e)
| 1,650 | 9 | 103 |
5632ee42701d1fcbabc80e7abde275ccc5f6d2d1 | 206 | py | Python | Desafios/desafio-03.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | Desafios/desafio-03.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | Desafios/desafio-03.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | # Aula 6 - Desafio 3: Somando dois numeros
num1 = int(input('1º numero: '))
num2 = int(input('2º numero: '))
soma = (num1 + num2)
print(f'A soma entre {num1} e {num2} eh igual a \033[7;33m{soma}\033[m')
| 22.888889 | 72 | 0.635922 | # Aula 6 - Desafio 3: Somando dois numeros
num1 = int(input('1º numero: '))
num2 = int(input('2º numero: '))
soma = (num1 + num2)
print(f'A soma entre {num1} e {num2} eh igual a \033[7;33m{soma}\033[m')
| 0 | 0 | 0 |
b6029e5c3de03586995e695f33d08198c6b3bcec | 4,639 | py | Python | equilibrium-propagation/numpy_one_layer.py | jiangdaniel/dl-papers | ca85708b5629dc1ba22ec1dfc023d3a6267b0d34 | [
"MIT"
] | 1 | 2019-03-26T12:19:59.000Z | 2019-03-26T12:19:59.000Z | equilibrium-propagation/numpy_one_layer.py | jiangdaniel/ml-implementations | ca85708b5629dc1ba22ec1dfc023d3a6267b0d34 | [
"MIT"
] | 5 | 2018-11-26T05:48:52.000Z | 2018-11-26T05:50:45.000Z | equilibrium-propagation/numpy_one_layer.py | jiangdaniel/ml-implementations | ca85708b5629dc1ba22ec1dfc023d3a6267b0d34 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False)
args = parser.parse_args()
main(args)
| 36.527559 | 117 | 0.538047 | #!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
def main(args):
trainloader, testloader = get_loaders(args.batch_size, args.fashion)
epsilon = 0.5
beta = 1.0
alpha1 = 0.1
alpha2 = 0.05
a = np.sqrt(2.0 / (784 + 500))
W1 = np.random.uniform(-a, a, (784, 500))
b1 = np.random.uniform(-a, a, 500)
a = np.sqrt(2.0 / (500 + 10))
W2 = np.random.uniform(-a, a, (500, 10))
b2 = np.random.uniform(-a, a, 10)
states = [(np.random.uniform(0, 1., (args.batch_size, 500)), \
np.random.uniform(0, 1., (args.batch_size, 10))) for _ in range(len(trainloader))]
for epoch in range(args.epochs):
running_loss = running_energy = running_true_positive = 0.
for i, (x, labels) in enumerate(tqdm(trainloader, desc=f"Epoch {epoch}")):
x, labels = x.view(-1, 784).numpy(), labels.numpy()
h, y = states[i]
# Free phase
for j in range(20):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum())
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_free, y_free = np.copy(h), np.copy(y)
states[i] = h_free, y_free
t = np.zeros((x.shape[0], 10))
t[np.arange(t.shape[0]), labels] = 1
# Weakly clamped phase
for j in range(4):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y + beta * (t - y)
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum()
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_clamped = np.copy(h)
y_clamped = np.copy(y)
W1 += alpha1 / beta * (rho(x.T) @ rho(h_clamped) - rho(x.T) @ rho(h_free)) / args.batch_size
W2 += alpha2 / beta * (rho(h_clamped.T) @ rho(y_clamped) - rho(h_free.T) @ rho(y_free)) / args.batch_size
b1 += alpha1 / beta * (rho(h_clamped) - rho(h_free)).mean(0)
b2 += alpha2 / beta * (rho(y_clamped) - rho(y_free)).mean(0)
running_energy += (np.square(h_free).sum() + np.square(y_free).sum() \
- (W1 * (x.T @ h_free)).sum() - (W2 * (h_free.T @ y_free)).sum()) / 2 \
- (h_free @ b1).sum() - (y_free @ b2).sum()
running_loss += np.square(t - y_free).sum()
running_true_positive += np.count_nonzero(np.argmax(y_free, 1) == labels)
energy_avg = running_energy / (len(trainloader) * args.batch_size)
accuracy_avg = running_true_positive / (len(trainloader) * args.batch_size)
loss_avg = running_loss / (len(trainloader) * args.batch_size)
print(f"Energy: {energy_avg}, Accuracy: {accuracy_avg}, Loss: {loss_avg}")
def rho(x):
return np.copy(np.clip(x, 0., 1.))
def d_rho(x):
return (x >= 0.) * (x <= 1.)
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
if fashion:
mnist = torchvision.datasets.FashionMNIST
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root="./data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root="./data", train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False)
args = parser.parse_args()
main(args)
| 4,006 | 0 | 92 |
00dab43f15c3b886433951b60f7cd8a4dda65920 | 995 | py | Python | tests/test_stdout_stderr.py | MisanthropicBit/colorise | c7a7e3d4b224e80f39761edfc10e5676b610ba41 | [
"BSD-3-Clause"
] | 2 | 2016-02-07T19:58:46.000Z | 2022-03-28T12:26:57.000Z | tests/test_stdout_stderr.py | MisanthropicBit/colorise | c7a7e3d4b224e80f39761edfc10e5676b610ba41 | [
"BSD-3-Clause"
] | 5 | 2018-05-25T04:36:11.000Z | 2021-01-18T19:08:04.000Z | tests/test_stdout_stderr.py | MisanthropicBit/colorise | c7a7e3d4b224e80f39761edfc10e5676b610ba41 | [
"BSD-3-Clause"
] | 2 | 2018-03-04T21:57:03.000Z | 2022-03-28T12:25:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test writing to stdout and stderr."""
import os
import sys
import pytest
import colorise
@pytest.mark.skip_on_windows
| 26.184211 | 77 | 0.635176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test writing to stdout and stderr."""
import os
import sys
import pytest
import colorise
@pytest.mark.skip_on_windows
def test_stdout_stderr(redirect):
with redirect('stdout') as stdout:
colorise.cprint('Hello', fg='red', file=sys.stderr)
assert stdout.value == ''
with redirect('stderr') as stderr:
colorise.cprint('Hello', fg='red', file=sys.stdout)
assert stderr.value == ''
with redirect('stdout') as stdout:
with redirect('stderr') as stderr:
colorise.cprint('Hello', fg='red', file=sys.stdout)
colorise.cprint('World', fg='blue', file=sys.stderr)
assert stdout.value == '\x1b[0m\x1b[31mHello\x1b[0m' + os.linesep
assert stderr.value == '\x1b[0m\x1b[34mWorld\x1b[0m' + os.linesep
def test_stdin():
with pytest.raises(AttributeError, match=r"no attribute 'flush'"):
colorise.cprint('Hello', fg='red', file=sys.stdin)
| 776 | 0 | 45 |
4ac117a2732d98a9b33699c194bf06fe8aa8fc28 | 14,896 | py | Python | common/trainers/bert_trainer.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | 5 | 2020-07-12T08:27:47.000Z | 2021-10-16T11:40:48.000Z | common/trainers/bert_trainer.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | null | null | null | common/trainers/bert_trainer.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | 1 | 2021-04-12T09:54:37.000Z | 2021-04-12T09:54:37.000Z | import datetime
import os
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from tqdm import trange
from common.evaluators.bert_evaluator import BertEvaluator
from datasets.bert_processors.abstract_processor import convert_examples_to_features
from datasets.bert_processors.abstract_processor import convert_examples_to_hierarchical_features
from utils.optimization import warmup_linear
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from pathlib import Path
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.savefig('grads.png')
| 46.55 | 188 | 0.637755 | import datetime
import os
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from tqdm import trange
from common.evaluators.bert_evaluator import BertEvaluator
from datasets.bert_processors.abstract_processor import convert_examples_to_features
from datasets.bert_processors.abstract_processor import convert_examples_to_hierarchical_features
from utils.optimization import warmup_linear
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from pathlib import Path
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.savefig('grads.png')
class BertTrainer(object):
def __init__(self, model, optimizer, processor, args):
self.args = args
self.model = model
self.optimizer = optimizer
self.processor = processor
self.train_examples = self.processor.get_train_examples(args.data_dir, args.train_name)
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.snapshot_path = os.path.join(self.args.save_path, self.processor.NAME, '%s.pt' % timestamp)
self.num_train_optimization_steps = int(
len(self.train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs
if args.local_rank != -1:
self.num_train_optimization_steps = args.num_train_optimization_steps // torch.distributed.get_world_size()
self.log_header = 'Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss Dev/F1ma, Dev/HLoss, Dev/Jacc, Train/Loss'
self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'.split(','))
self.iterations, self.nb_tr_steps, self.tr_loss = 0, 0, 0
self.best_dev_measure, self.unimproved_iters = 0, 0
self.early_stop = False
def get_order(self, name):
groups = {'bert.embeddings':0, 'bert.pooler':12, 'classifier':13} #classifier
for i in range(12):
groups['bert.encoder.layer.'+str(i)] = i+1
x=[v for k, v in groups.items() if name.startswith(k)][0]
return x
def train_layer_qroup(self, dataloader, to_freeze_layer, model_path):
self.train_epoch(dataloader, freez_layer=to_freeze_layer)
dev_evaluator = BertEvaluator(self.model, self.processor, self.args, split='dev')
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro, dev_hamming_loss, dev_jaccard_score, dev_predicted_labels, dev_target_labels = \
dev_evaluator.get_scores()[0]
# Print validation results
tqdm.write(self.log_header)
tqdm.write(self.log_template.format(1, self.iterations, 1, self.args.epochs,
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro,
dev_hamming_loss, dev_jaccard_score))
torch.save(self.model, model_path / f'{to_freeze_layer}.pt')
# update learning rate
for groups in self.optimizer.param_groups:
lr = groups['lr'] if 'lr' in groups else self.args.lr
groups['lr'] = 2e-5
def freez(self, layer):
'''
layer and its subsequent layers will be unfreezd, the layers befor 'layer' will be freezed!
:param layer:
:return:
'''
if layer:
order = self.get_order(layer)
for n, p in self.model.named_parameters():
if self.get_order(n)< order:
p.requires_grad = False
else:
p.requires_grad = True
def unfreez_all(self):
for n, p in self.model.named_parameters():
p.requires_grad = True
def train_epoch(self, train_dataloader, freez_layer=None):
loss_epoch = 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Training")):
self.model.train()
if freez_layer: self.freez(freez_layer)
batch = tuple(t.to(self.args.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = self.model(input_ids, segment_ids, input_mask)
loss_extra = 0
if isinstance(logits, tuple):
logits, (first_SEP, second_SEP) = logits
cos_simi = F.cosine_similarity(first_SEP, second_SEP)
for i in range(len(label_ids)):
if torch.eq(label_ids[i], torch.Tensor([0,1]).long().cuda()).all():
loss_extra += 1 - cos_simi[i]
elif torch.eq(label_ids[i], torch.Tensor([1, 0]).long().cuda()).all():
loss_extra += max(0, cos_simi[i])
else:
print('Invalid label value ERROR', label_ids[i])
exit(1)
if self.args.is_multilabel:
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float())
else:
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
#print( 'loss extra: ', loss_extra)
loss += loss_extra
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.fp16:
self.optimizer.backward(loss)
else:
loss.backward()
self.tr_loss += loss.item()
loss_epoch += loss.item()
self.nb_tr_steps += 1
if (step + 1) % self.args.gradient_accumulation_steps == 0:
if self.args.fp16:
lr_this_step = self.args.learning_rate * warmup_linear(self.iterations / self.num_train_optimization_steps, self.args.warmup_proportion)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr_this_step
self.optimizer.step()
self.optimizer.zero_grad()
self.iterations += 1
#print('train loss', np.mean(tr_loss))
#print('avg grads', np.mean(grads))
return loss_epoch / (step+1)
def train(self):
if self.args.is_hierarchical:
train_features = convert_examples_to_hierarchical_features(
self.train_examples, self.args.max_seq_length, self.tokenizer)
else:
train_features = convert_examples_to_features(
self.train_examples, self.args.max_seq_length, self.tokenizer)
unpadded_input_ids = [f.input_ids for f in train_features]
unpadded_input_mask = [f.input_mask for f in train_features]
unpadded_segment_ids = [f.segment_ids for f in train_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
print("Number of examples: ", len(self.train_examples))
print("Batch size:", self.args.batch_size)
print("Num of steps:", self.num_train_optimization_steps)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids)
if self.args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.args.batch_size)
for epoch in trange(int(self.args.epochs), desc="Epoch"):
loss_epoch = self.train_epoch(train_dataloader)
dev_evaluator = BertEvaluator(self.model, self.processor, self.args, split='dev')
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro, dev_hamming_loss, dev_jaccard_score, dev_predicted_labels, dev_target_labels = dev_evaluator.get_scores()[0]
# Print validation results
tqdm.write(self.log_header)
tqdm.write(self.log_template.format(epoch + 1, self.iterations, epoch + 1, self.args.epochs,
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro, dev_hamming_loss, dev_jaccard_score, loss_epoch))
if self.args.early_on_f1:
if dev_recall != 1:
dev_measure = dev_f1
else:
dev_measure = 0
measure_name = 'F1'
else:
dev_measure = dev_acc
measure_name = 'Balanced Acc'
# Update validation results
if dev_measure > self.best_dev_measure:
self.unimproved_iters = 0
self.best_dev_measure = dev_measure
torch.save(self.model, self.snapshot_path)
else:
self.unimproved_iters += 1
if self.unimproved_iters >= self.args.patience:
self.early_stop = True
print("Early Stopping. Epoch: {}, Best {}: {}".format(epoch, measure_name, self.best_dev_measure))
break
def train_gradually(self):
if self.args.is_hierarchical:
train_features = convert_examples_to_hierarchical_features(
self.train_examples, self.args.max_seq_length, self.tokenizer)
else:
train_features = convert_examples_to_features(
self.train_examples, self.args.max_seq_length, self.tokenizer)
unpadded_input_ids = [f.input_ids for f in train_features]
unpadded_input_mask = [f.input_mask for f in train_features]
unpadded_segment_ids = [f.segment_ids for f in train_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
print("Number of examples: ", len(self.train_examples))
print("Batch size:", self.args.batch_size)
print("Num of steps:", self.num_train_optimization_steps)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids)
if self.args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.args.batch_size)
# train gradually
model_path = self.snapshot_path.split('/')[0:-1]
model_path = Path('/'.join(model_path))
# freeze all layers except classifier
self.train_layer_qroup(train_dataloader,to_freeze_layer='classifier', model_path=model_path)
# freeze all layers expect pooler and its subsequents
'''self.train_layer_qroup(train_dataloader, to_freeze_layer='bert.pooler', model_path=model_path)
for i in range(11,-1, -1):
self.train_layer_qroup(train_dataloader, to_freeze_layer='bert.encoder.layer.'+str(i), model_path=model_path)'''
self.unfreez_all()
for epoch in trange(int(self.args.epochs), desc="Epoch"):
self.train_epoch(train_dataloader)
dev_evaluator = BertEvaluator(self.model, self.processor, self.args, split='dev')
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro, dev_hamming_loss, dev_jaccard_score, dev_predicted_labels, dev_target_labels = dev_evaluator.get_scores()[0]
# Print validation results
tqdm.write(self.log_header)
tqdm.write(self.log_template.format(epoch + 1, self.iterations, epoch + 1, self.args.epochs,
dev_acc, dev_precision, dev_recall, dev_f1, dev_loss, dev_f1_macro, dev_hamming_loss, dev_jaccard_score))
# Update validation results
if dev_f1 > self.best_dev_f1:
self.unimproved_iters = 0
self.best_dev_f1 = dev_f1
torch.save(self.model, self.snapshot_path)
else:
self.unimproved_iters += 1
if self.unimproved_iters >= self.args.patience:
self.early_stop = True
tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_f1))
break
| 12,079 | 655 | 23 |
e3e23e7da12594d400ab1a9586a233be2c2e5204 | 10,834 | py | Python | check_elasticstack.py | icefish-creativ/check_elastic-stack | 7aad8dcbcff0291d8cb6bb64b2bab72feb9cd214 | [
"Apache-2.0"
] | null | null | null | check_elasticstack.py | icefish-creativ/check_elastic-stack | 7aad8dcbcff0291d8cb6bb64b2bab72feb9cd214 | [
"Apache-2.0"
] | null | null | null | check_elasticstack.py | icefish-creativ/check_elastic-stack | 7aad8dcbcff0291d8cb6bb64b2bab72feb9cd214 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# noris network AG 2020
# Tim Zöllner
__date__ = '2020-06-22'
__version__ = '0.4.2'
#from docopt import docopt
import argparse
import sys
import ssl
import json
import requests
from requests.auth import HTTPBasicAuth
from datetime import datetime
# check elasticsearch module
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, \
TransportError, \
ConnectionTimeout, \
NotFoundError, \
RequestError
except ImportError as missing:
print (
'Error - could not import all required Python modules\n"%s"'
% missing + '\nDependency installation with pip:\n'
'"# pip install docopt elasticsearch"'
'or use your prefered package manage, i.e. APT or YUM.\n Example: yum install python-docopt python-elasticsearch')
sys.exit(2)
#ssl._create_default_https_context = ssl._create_unverified_context
if __name__ == '__main__':
args = parser_command_line()
if args.subparser_name == 'cluster':
API_CLUSTER_HEALTH = 'https://{}:9200/_cluster/health'.format(
args.client_node
)
if args.cluster_health:
result = getAPI(API_CLUSTER_HEALTH)
check_cluster_health(
result['status'],
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'node':
API_NODES_STATS = 'https://{}:9200/_nodes/{}/stats'.format(
args.client_node,
args.node_name,
)
if args.heap_used_percent:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_heap_used_percent(
node['jvm']['mem']['heap_used_percent'],
args.perf_data,
args.only_graph,
)
if args.documents_count:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_documents_count(
node['indices']['docs']['count'],
args.perf_data,
args.only_graph,
)
if args.ratio_search_query_time:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
query_time_in_millis = float(
node['indices']['search']['query_time_in_millis']
)
query_total = float(
node['indices']['search']['query_total']
)
ratio = round(
query_time_in_millis/query_total,
2
)
check_ratio_search_query_time(
ratio,
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'indices':
es = Elasticsearch(host=args.client_node)
if args.last_entry:
API_ALIASES = 'https://{}:9200/{}/_alias'
if args.index:
pattern = args.index
elif args.prefix:
pattern = args.prefix + "*"
else:
print("Invalid index name or prefix")
sys.exit(1)
index = get_indices(
API_ALIASES.format(
args.client_node,
pattern,
)
)[-1]
last_timestamp = get_last_timestamp(
index=index,
)
timedelta = (datetime.utcnow() - last_timestamp).seconds
check_last_entry(
timedelta,
args.perf_data,
args.only_graph,
)
| 25.313084 | 122 | 0.555012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# noris network AG 2020
# Tim Zöllner
__date__ = '2020-06-22'
__version__ = '0.4.2'
#from docopt import docopt
import argparse
import sys
import ssl
import json
import requests
from requests.auth import HTTPBasicAuth
from datetime import datetime
# check elasticsearch module
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, \
TransportError, \
ConnectionTimeout, \
NotFoundError, \
RequestError
except ImportError as missing:
print (
'Error - could not import all required Python modules\n"%s"'
% missing + '\nDependency installation with pip:\n'
'"# pip install docopt elasticsearch"'
'or use your prefered package manage, i.e. APT or YUM.\n Example: yum install python-docopt python-elasticsearch')
sys.exit(2)
#ssl._create_default_https_context = ssl._create_unverified_context
def getAPI(url):
try:
req = requests.get(url , auth=HTTPBasicAuth(args.es_user, args.es_password), verify=args.cert_path)
return json.loads(req.text)
except:
print("CRITICAL - Unable to get API url '{}'".format(url))
sys.exit(2)
def is_number(x):
return isinstance(x, (int, float, complex))
#return isinstance(x, (int, long, float, complex))
def check_status(
value,
message,
only_graph=False,
critical=None,
warning=None,
ok=None,
):
if only_graph:
print("{}".format(message))
sys.exit(0)
if (is_number(value) and is_number(critical) and is_number(warning)):
if value >= critical:
print("CRITICAL - {}".format(message))
sys.exit(2)
elif value >= warning:
print("WARNING - {}".format(message))
sys.exit(1)
else:
print("OK - {}".format(message))
sys.exit(0)
else:
if value in critical:
print("CRITICAL - {}".format(message))
sys.exit(2)
elif value in warning:
print("WARNING - {}".format(message))
sys.exit(1)
elif value in ok:
print("OK - {}".format(message))
sys.exit(0)
else:
print("UNKNOWN - Unexpected value: {}".format(value))
sys.exit(3)
def parser_command_line():
parser = argparse.ArgumentParser(
description='Elasticsearch Nagios checks'
)
subparsers = parser.add_subparsers(
help='All Elasticsearch checks groups',
dest='subparser_name',
)
# Common args
parser.add_argument(
'-n',
'--node-name',
default='_local',
help='Node name in the Cluster',
dest='node_name',
)
parser.add_argument(
'-C',
'--cert-path',
default='_local',
help='Path to Certificate',
dest='cert_path',
)
parser.add_argument(
'-c',
'--client-node',
default='localhost',
help='Client node name (FQDN) for HTTP communication',
dest='client_node',
)
parser.add_argument(
'-D',
'--perf-data',
action='store_true',
help='Enable Nagios performance data (Valid for all checks groups)',
dest='perf_data',
)
parser.add_argument(
'-G',
'--only-graph',
action='store_true',
help='Enable Nagios to print only message',
dest='only_graph',
)
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s {}'.format(__version__)
)
parser.add_argument(
'-u',
'--user',
help='User who has access to elasticsearch',
dest= 'es_user'
)
parser.add_argument(
'-p',
'--password',
help='Password for User who has access to elasticsearch',
dest= 'es_password'
)
# Cluster Checks
cluster = subparsers.add_parser(
'cluster',
help='All Cluster checks',
)
cluster.add_argument(
'--cluster-health',
action='store_true',
help='Check the Cluster health (green, yellow, red)',
)
# Node Checks
node = subparsers.add_parser(
'node',
help='All Node checks',
)
node.add_argument(
'--heap-used-percent',
action='store_true',
help='Check the Heap used percent',
)
node.add_argument(
'--documents-count',
action='store_true',
help='Documents on node',
)
node.add_argument(
'--ratio-search-query-time',
action='store_true',
help='Ratio search query_time_in_millis/query_total',
)
# Indices Checks
indices = subparsers.add_parser(
'indices',
help='All indices checks',
)
indices.add_argument(
'--index',
default=None,
help='Index name',
)
indices.add_argument(
'--prefix',
default=None,
help='Include only indices beginning with prefix',
)
indices.add_argument(
'--doc-type',
default=None,
help='Include only documents with doc-type',
)
indices.add_argument(
'--last-entry',
action='store_true',
help='Check last entry in the index. Only for timestamp in UTC',
)
return parser.parse_args()
def check_cluster_health(
result,
perf_data=None,
only_graph=False
):
critical = 'red'
warning = 'yellow'
ok = 'green'
message = 'The cluster health status is {}'.format(result)
if perf_data:
lookup = {
'green': 2,
'yellow': 1,
'red': 0,
}
message += " | cluster_status={}".format(lookup[result])
check_status(
result,
message,
only_graph,
critical,
warning,
ok,
)
def check_heap_used_percent(
result,
perf_data=None,
only_graph=False,
critical=None,
warning=None,
):
critical = critical or 90
warning = warning or 75
message = 'The Heap used percent is {}%'.format(result)
if perf_data:
message += " | heap_used_percent={}".format(result)
check_status(
result,
message,
only_graph,
critical,
warning,
)
def check_documents_count(
result,
perf_data=None,
only_graph=False,
critical=None,
warning=None,
):
critical = critical or 0
warning = warning or 0
message = 'The documents count is {}'.format(result)
if perf_data:
message += " | documents_count={}".format(result)
check_status(
result,
message,
only_graph,
critical,
warning,
)
def check_ratio_search_query_time(
result,
perf_data=None,
only_graph=False,
critical=None,
warning=None,
):
critical = critical or 0
warning = warning or 0
message = 'The ratio query_time_in_millis/query_total is {}'.format(result)
if perf_data:
message += " | ratio_search_query_time={}".format(result)
check_status(
result,
message,
only_graph,
critical,
warning,
)
def check_last_entry(
result,
perf_data=None,
only_graph=False,
critical=None,
warning=None,
):
critical = critical or 120
warning = warning or 60
message = 'Last entry {} seconds ago'.format(result)
if perf_data:
message += " | seconds={}".format(result)
check_status(
result,
message,
only_graph,
critical,
warning,
)
def get_indices(url):
indices_dict = getAPI(url)
return sorted(indices_dict.keys())
def get_last_timestamp(index):
API_LAST_ENTRY = 'https://{}:9200/{}/_search?sort=@timestamp:desc&size=1&_source=@timestamp'.format(
args.client_node,
args.index
)
resultquery = getAPI(API_LAST_ENTRY)
print(resultquery)
return datetime.strptime(
resultquery['hits']['hits'][0]['_source']['@timestamp'].split(".")[0],
"%Y-%m-%dT%H:%M:%S"
)
if __name__ == '__main__':
args = parser_command_line()
if args.subparser_name == 'cluster':
API_CLUSTER_HEALTH = 'https://{}:9200/_cluster/health'.format(
args.client_node
)
if args.cluster_health:
result = getAPI(API_CLUSTER_HEALTH)
check_cluster_health(
result['status'],
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'node':
API_NODES_STATS = 'https://{}:9200/_nodes/{}/stats'.format(
args.client_node,
args.node_name,
)
if args.heap_used_percent:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_heap_used_percent(
node['jvm']['mem']['heap_used_percent'],
args.perf_data,
args.only_graph,
)
if args.documents_count:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_documents_count(
node['indices']['docs']['count'],
args.perf_data,
args.only_graph,
)
if args.ratio_search_query_time:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
query_time_in_millis = float(
node['indices']['search']['query_time_in_millis']
)
query_total = float(
node['indices']['search']['query_total']
)
ratio = round(
query_time_in_millis/query_total,
2
)
check_ratio_search_query_time(
ratio,
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'indices':
es = Elasticsearch(host=args.client_node)
if args.last_entry:
API_ALIASES = 'https://{}:9200/{}/_alias'
if args.index:
pattern = args.index
elif args.prefix:
pattern = args.prefix + "*"
else:
print("Invalid index name or prefix")
sys.exit(1)
index = get_indices(
API_ALIASES.format(
args.client_node,
pattern,
)
)[-1]
last_timestamp = get_last_timestamp(
index=index,
)
timedelta = (datetime.utcnow() - last_timestamp).seconds
check_last_entry(
timedelta,
args.perf_data,
args.only_graph,
)
| 6,909 | 0 | 253 |
4c3b6b4167ba28cc061c2c1060cef6caaa82766e | 8,930 | py | Python | sed2tau/plot_data.py | masato1122/md2tau | cd9998f1da13887cbc39e70224ffed5a86fc8c11 | [
"MIT"
] | 3 | 2021-12-19T02:24:15.000Z | 2022-01-09T23:40:49.000Z | sed2tau/plot_data.py | masato1122/md2tau | cd9998f1da13887cbc39e70224ffed5a86fc8c11 | [
"MIT"
] | null | null | null | sed2tau/plot_data.py | masato1122/md2tau | cd9998f1da13887cbc39e70224ffed5a86fc8c11 | [
"MIT"
] | 3 | 2021-12-18T08:02:15.000Z | 2021-12-27T22:55:25.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
#--- for matplotlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from matplotlib import cm
#import seaborn as sns
#-------------------------
NUM_MULTIPLE = 5
NUM_SECTION = 500
#-------------------------
#--- plot data
#--- plot data with Lorentzian function
| 29.183007 | 98 | 0.53617 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
#--- for matplotlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from matplotlib import cm
#import seaborn as sns
def bubble_sort_down_ID(array):
n = len(array)
ID = np.arange(n)
asort = np.zeros(n)
for i in range(n):
asort[i] = array[i]
for i in range(n-1):
for j in range(n-1, i, -1):
if asort[ID[j]] > asort[ID[j-1]]:
itemp = ID[j]
ID[j] = ID[j-1]
ID[j-1] = itemp
return ID
def bubble_sort_down(xdat, ydat):
ndat = len(xdat)
newdat = np.zeros((ndat, 2))
for i in range(ndat):
for j in range(2):
newdat[i][0] = xdat[i]
newdat[i][1] = ydat[i]
newdat = sorted(newdat, key=lambda x: x[1], reverse=True)
xnew = np.zeros(ndat)
ynew = np.zeros(ndat)
for i in range(ndat):
xnew[i] = newdat[i][0]
ynew[i] = newdat[i][1]
return xnew, ynew
#-------------------------
NUM_MULTIPLE = 5
NUM_SECTION = 500
#-------------------------
def cal_ID_col(ID_sort, Ncol):
ndat = len(ID_sort)
Neach = int(ndat / NUM_SECTION)
ID_col = [int(0) for i in range(ndat)]
for ii in range(ndat):
num = ID_sort[ii]
ID_col[num] = Ncol - int(num / Neach)
if ID_col[num] < 0.:
ID_col[num] = 0.
return ID_col
def cal_ID_col2(ndat, Ncol):
Neach = int(ndat / NUM_SECTION)
ID_col = np.arange(ndat)
for ii in range(ndat):
ID_col[ii] = Ncol - int(ii / Neach)
if ID_col[ii] < 0.:
ID_col[ii] = 0.
return ID_col
def cal_ID_plot(ndat, nskip):
II = []; count = 0
for i in range(ndat):
if i <= ndat * 0.1:
II.append(i)
count += 1
else:
if i%nskip == 0:
II.append(i)
count +=1
IDplot = np.zeros(count, dtype=int)
for i in range(count):
IDplot[i] = II[i]
return IDplot
#--- plot data
def Plot_Data_Peaks(FNAME, xdat, ydat, L_PEAK, W_PEAK, I_PEAK):
ndat = len(xdat)
npeak = len(L_PEAK)
ymin = np.amin(ydat) * 0.1
ymax = np.amax(ydat) * 2.0
#---------
Ndiv = NUM_MULTIPLE
#---------
xmax = 55.
#--- size & aspect ratio
ratio = 0.2 * float(Ndiv)
xwidth = 40/Ndiv + 2
ywidth = xwidth * 0.2 * Ndiv
fig = plt.figure(figsize=(xwidth, ywidth))
#-- sort ydata for color
Ncol = 11
PER_SEC = 100./float(NUM_SECTION) #--- (100/Nsec)% data is in a section.
Neach = int(ndat / NUM_SECTION)
#--- ver.1
#ID_sort = bubble_sort_down_ID(ydat)
#ID_col = cal_ID_col(ID_sort, Ncol)
#--- ver.2
xsort, ysort = bubble_sort_down(xdat, ydat)
ID_col = cal_ID_col2(len(xsort), Ncol)
#-- skip data
nplot = 2000; nskip = int(len(xsort) / nplot)
ID_plot = cal_ID_plot(len(xsort), nskip)
'''
if nskip == 0:
nskip = 1
if nplot > len(xsort):
nplot = len(xsort)
ID_plot = np.zeros(nplot, dtype=int)
for i in range(nplot):
ID_plot[i] = i * nskip
'''
plt.rcParams["font.size"] = 10
#--- multiple plot
for isec in range(Ndiv):
plt.subplot(Ndiv,1,isec+1)
x0 = xmax * float(isec)/ float(Ndiv) - 0.2
x1 = xmax * float(isec+1)/ float(Ndiv) + 0.2
if isec == Ndiv - 1:
x1 = 52.
ax = fig.add_subplot(Ndiv,1,isec+1)
ax.grid(color='grey', axis='both', linestyle='solid', linewidth=0.4, which='major')
ax.grid(color='grey', axis='x', linestyle='dashed', linewidth=0.2, which='minor')
plt.gca().xaxis.set_major_locator(tick.MultipleLocator(0.5))
plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(0.1))
plt.tick_params(axis='both', which='both', direction='in')
plt.xlim([x0, x1])
#-- ylabel
plt.ylim([ymin, ymax])
plt.yscale("log")
plt.ylabel('SED (-)')
#-- 2. raw dat
cm = plt.cm.get_cmap('rainbow', Ncol)
#sc = plt.scatter(xdat[ID_sort2], ydat[ID_sort2], c=ID_col2, s=5, cmap=cm)
sc = plt.scatter(xsort[ID_plot], ysort[ID_plot], c=ID_col[ID_plot], s=5, cmap=cm)
if isec == Ndiv-1:
cbar = plt.colorbar(sc)
#cbar.set_label(size=14)
cbar.ax.set_ylabel('%4.2f%% data \nin a section'%(PER_SEC), fontsize=10)
plt.xlabel('Frequency (THz)')
#--- 3. peaks
pcount = 0
for ip in range(npeak):
if W_PEAK[ip] <= x0 or x1 <= W_PEAK[ip]:
continue
c = plt.cm.Vega10(float(ip) / float(npeak))
plt.axvline(W_PEAK[ip], linewidth=1.0, color=c, label=I_PEAK[ip])
plt.text(W_PEAK[ip], ydat[I_PEAK[ip]], L_PEAK[ip], ha = 'left', va = 'bottom', size=6)
pcount += 1
if pcount != 0:
plt.legend(ncol=18, loc='lower left', handlelength=0.5, fontsize=10)
plt.savefig(FNAME, format = 'png', dpi=200)
while os.path.exists(FNAME) == False:
pass
print "Output:", FNAME
return 0
#--- plot data with Lorentzian function
def cal_lorentzian(x, center, sigma, amp):
p1 = amp / sigma / np.pi
p2 = 1. + np.power((x - center) / sigma, 2)
return p1 / p2
def Plot_Data_Lorentzian(FNAME, xdat, ydat, L_PEAK, W_PEAK, I_PEAK, S_LOR, A_LOR):
ndat = len(xdat)
npeak = len(L_PEAK)
ymin = np.amin(ydat) * 0.1
ymax = np.amax(ydat) * 2.0
#---------
Ndiv = NUM_MULTIPLE
#---------
xmax = 55.
#--- cal. tau [ps]
TAU_LABEL = []
for i in range(npeak):
tau = "%.0fps"%(1./2./S_LOR[i])
TAU_LABEL.append(tau)
#--- size & aspect ratio
ratio = 0.2 * float(Ndiv)
xwidth = 40/Ndiv + 2
ywidth = xwidth * 0.2 * Ndiv
fig = plt.figure(figsize=(xwidth, ywidth))
#-- sort ydata for color
Ncol = 11
PER_SEC = 100./float(NUM_SECTION) #--- (100/Nsec)% data is in a section.
Neach = int(ndat / NUM_SECTION)
#--- ver.1
#ID_sort = bubble_sort_down_ID(ydat)
#ID_col = cal_ID_col(ID_sort, Ncol)
#--- ver.2
xsort, ysort = bubble_sort_down(xdat, ydat)
ID_col = cal_ID_col2(len(xsort), Ncol)
#--- skip data
nplot = 2000; nskip = int(len(xsort) / nplot)
ID_plot = cal_ID_plot(len(xsort), nskip)
'''
if nskip == 0: nskip = 1
if nplot > len(xsort):
nplot = len(xsort)
ID_plot = [int(0) for i in range(nplot)]
for i in range(nplot):
ID_plot[i] = i * nskip
'''
plt.rcParams["font.size"] = 10
#--- multiple plot
for isec in range(Ndiv):
plt.subplot(Ndiv,1,isec+1)
x0 = xmax * float(isec)/ float(Ndiv) - 0.2
x1 = xmax * float(isec+1)/ float(Ndiv) + 0.2
if isec == Ndiv - 1:
x1 = 52
ax = fig.add_subplot(Ndiv,1,isec+1)
ax.grid(color='grey', axis='both', linestyle='dashed', linewidth=0.2)
plt.gca().xaxis.set_major_locator(tick.MultipleLocator(1))
plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(0.1))
plt.tick_params(axis='both', which='both', direction='in')
plt.xlim([x0, x1])
#-- ylabel
#plt.ylim([ymin, ymax])
plt.yscale("log")
plt.ylabel('SED (-)')
#-- 2. raw dat
cm = plt.cm.get_cmap('rainbow', Ncol)
#sc = plt.scatter(xdat[ID_sort2], ydat[ID_sort2], c=ID_col2, s=5, cmap=cm)
sc = plt.scatter(xsort[ID_plot], ysort[ID_plot], c=ID_col[ID_plot], s=2, cmap=cm)
if isec == Ndiv-1:
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel('%4.2f%% data \nin a section'%(PER_SEC), fontsize=10)
plt.xlabel('Frequency (THz)')
#--- 3. peaks
pcount = 0
for ip in range(npeak):
if W_PEAK[ip] <= x0 or x1 <= W_PEAK[ip]:
continue
c = plt.cm.Vega10(float(ip) / float(npeak))
#--- local Lorentzian curve
xlor_min = W_PEAK[ip] - 0.3; xlor_max = W_PEAK[ip] + 0.3
if xlor_min < x0:
xlor_min = x0
if xlor_max > x1:
xlor_max = x1
xlor = np.linspace(xlor_min, xlor_max, 100)
ylor = cal_lorentzian(xlor, W_PEAK[ip], S_LOR[ip], A_LOR[ip])
xlabel = xlor[int(len(xlor) * 0.6)]
ylabel = ylor[int(len(xlor) * 0.6)]
#--- plot
plt.plot(xlor, ylor, color=c, label=L_PEAK[ip])
plt.text(xlabel, ylabel, TAU_LABEL[ip], ha='left', va='bottom', size=5)
pcount += 1
#if pcount != 0:
# plt.legend(ncol=18, loc='lower left', handlelength=0.5, fontsize=10)
plt.savefig(FNAME, format = 'png', dpi=200)
while os.path.exists(FNAME) == False:
pass
print "Output:", FNAME
return 0
| 8,342 | 0 | 182 |
81ad25428f31193be3b7aecbee80e2115ff5b33f | 68 | py | Python | blitzdb/fields/email.py | marcinguy/blitzdb3 | 8b8bca02b205d7ff33d3902e5abb166e10a7b624 | [
"MIT"
] | 252 | 2015-01-02T13:05:12.000Z | 2021-12-29T13:36:47.000Z | blitzdb/fields/email.py | epatters/blitzdb | 4b459e0bcde9e1f6224dd4e3bea74194586864b0 | [
"MIT"
] | 33 | 2015-01-09T20:05:10.000Z | 2019-11-08T15:48:34.000Z | blitzdb/fields/email.py | epatters/blitzdb | 4b459e0bcde9e1f6224dd4e3bea74194586864b0 | [
"MIT"
] | 39 | 2015-01-20T01:15:04.000Z | 2022-03-26T01:01:15.000Z | from .base import BaseField
| 11.333333 | 28 | 0.75 | from .base import BaseField
class EmailField(BaseField):
pass
| 0 | 16 | 23 |
f488edc9a5fbefdb2ef2b84d17467fdbe625e9e7 | 1,659 | py | Python | tests/test_cookpad.py | dehlen/recipe-scrapers | c3cbe757b0b39b399721c37d488c77125593eb2d | [
"MIT"
] | null | null | null | tests/test_cookpad.py | dehlen/recipe-scrapers | c3cbe757b0b39b399721c37d488c77125593eb2d | [
"MIT"
] | null | null | null | tests/test_cookpad.py | dehlen/recipe-scrapers | c3cbe757b0b39b399721c37d488c77125593eb2d | [
"MIT"
] | null | null | null | from tests import ScraperTest
from recipe_scrapers.cookpad import CookPad
| 33.18 | 299 | 0.582881 | from tests import ScraperTest
from recipe_scrapers.cookpad import CookPad
class TestCookPadScraper(ScraperTest):
scraper_class = CookPad
def test_host(self):
self.assertEqual("cookpad.com", self.harvester_class.host())
def test_title(self):
self.assertEqual(self.harvester_class.title(), "30分で簡単♡本格バターチキンカレー♡")
def test_yields(self):
self.assertEqual("4人分 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://img.cpcdn.com/recipes/4610651/640x640c/6de3ac788480ce2787e5e39714ef0856?u=6992401&p=1519025894",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"♥鶏モモ肉 500g前後",
"♥玉ねぎ 2個",
"♥にんにくチューブ 5cm",
"♥生姜チューブ 5cm(なくても♡)",
"♥カレー粉 大さじ1と1/2",
"♥バター 大さじ2+大さじ3(60g)",
"*トマト缶 1缶",
"*コンソメ 小さじ1",
"*塩 小さじ(1〜)2弱",
"*砂糖 小さじ2",
"*水 100ml",
"*ケチャップ 大さじ1",
"♥生クリーム 100ml",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"鶏モモ肉 は一口大に、 玉ねぎ は薄切り(orみじん切り)にします♪\nフライパンに バター(大さじ2) を熱し、鶏肉 に 塩胡椒 をふり表面をこんがり焼きます♪\nお鍋に バター(大さじ3) にんにくチューブ 生姜チューブ 玉ねぎ を入れてあめ色になるまでじっくり炒めます♪\nカレー粉 を加えて弱火で3分くらい炒めます♪\n* と 鶏肉(油分も) を加えて沸騰したら火が通るまで(10分程)煮ます♪\n仕上げに 生クリーム を加えて混ぜ、温まったらすぐ火を止めます♪ 完成♡♡ 更に仕上げに生クリームをトッピングしました♡\n子供ごはんはこんな感じの盛り付けに♡♥",
self.harvester_class.instructions(),
)
| 2,050 | 208 | 23 |
1265adf15844bd9340b23f8ce9cab3e09814f72a | 170 | py | Python | extensions/python/qrgen/setup.py | tungdev1209/iOS_CI_Utils | a0ce44b3fd07010721cec8cad4ee43ebe073fcf4 | [
"MIT"
] | 63 | 2019-01-27T11:00:30.000Z | 2019-02-01T12:55:11.000Z | extensions/python/qrgen/setup.py | tungdev1209/iOS_CI_Utils | a0ce44b3fd07010721cec8cad4ee43ebe073fcf4 | [
"MIT"
] | null | null | null | extensions/python/qrgen/setup.py | tungdev1209/iOS_CI_Utils | a0ce44b3fd07010721cec8cad4ee43ebe073fcf4 | [
"MIT"
] | 27 | 2019-01-27T09:56:33.000Z | 2019-01-30T06:53:43.000Z | from setuptools import setup
setup(
name='qrgen',
version='0.0.1',
entry_points={
'console_scripts': [
'qrgen=qrgen:run'
]
}
) | 17 | 29 | 0.523529 | from setuptools import setup
setup(
name='qrgen',
version='0.0.1',
entry_points={
'console_scripts': [
'qrgen=qrgen:run'
]
}
) | 0 | 0 | 0 |
cdd64b5b3c28dec89269cfa05d6f14eb26232a4a | 1,758 | py | Python | newsroom/models.py | OulipianSummer/newsroom | 7622f9f12649c3d4ebc03c9da5ad184db95d6c49 | [
"MIT"
] | null | null | null | newsroom/models.py | OulipianSummer/newsroom | 7622f9f12649c3d4ebc03c9da5ad184db95d6c49 | [
"MIT"
] | null | null | null | newsroom/models.py | OulipianSummer/newsroom | 7622f9f12649c3d4ebc03c9da5ad184db95d6c49 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from django.template.defaultfilters import slugify
| 31.963636 | 103 | 0.687144 | from django.db import models
from django.urls import reverse
from django.template.defaultfilters import slugify
class Article(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
image = models.URLField()
subtitle = models.CharField(max_length=255)
alt = models.CharField(max_length=100)
slug = models.SlugField(null=True, unique=True)
section = models.ForeignKey(to="Section", related_name='article_section', on_delete=models.PROTECT)
timestamp = models.DateTimeField(auto_now=True)
author = models.ForeignKey(to="Author", related_name='article_author', on_delete=models.PROTECT)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('article_detail', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
return super().save(*args, **kwargs)
class Section(models.Model):
name = models.CharField(max_length=100, blank=True)
slug = models.SlugField(null=True, unique=True)
def __str__(self):
return self.name
def get_absolute_url(self):
pass
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
class Author(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
full_name = models.CharField(max_length=255)
slug = models.SlugField()
title = models.CharField(max_length=50, blank=True)
bio = models.TextField(blank=True, max_length=300)
picture = models.URLField(blank=True)
def __str__(self):
return self.full_name
| 419 | 1,156 | 69 |
dc0d12496a24a44ef0af787953cc533b35adbd1e | 5,062 | py | Python | exile/migrations/0001_initial.py | exildev/webpage | 0d545bd295f17e97f5c296939bc73e177e83ac55 | [
"MIT"
] | null | null | null | exile/migrations/0001_initial.py | exildev/webpage | 0d545bd295f17e97f5c296939bc73e177e83ac55 | [
"MIT"
] | null | null | null | exile/migrations/0001_initial.py | exildev/webpage | 0d545bd295f17e97f5c296939bc73e177e83ac55 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-14 22:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 39.24031 | 154 | 0.536942 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-14 22:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': "Item's",
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='MenuPrincipal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField(auto_now_add=True)),
('menu', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='exile.Menu')),
],
options={
'verbose_name': 'Menu principal',
'verbose_name_plural': 'Menu principal',
},
),
migrations.CreateModel(
name='OrdenItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Item')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Menu')),
],
options={
'verbose_name': 'Orden de item',
'verbose_name_plural': 'Orden de items',
},
),
migrations.CreateModel(
name='OrdenSubItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Item')),
],
options={
'verbose_name': 'Orden de subitem',
'verbose_name_plural': 'Ordenes de subitem',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
],
options={
'verbose_name': 'Pagina',
'verbose_name_plural': 'Paginas',
},
),
migrations.CreateModel(
name='Seccion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
('contenido', models.TextField()),
('pagina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Page')),
],
options={
'verbose_name': 'Secci\xf3n',
'verbose_name_plural': 'Secciones',
},
),
migrations.CreateModel(
name='SubItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
('pagina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Page')),
],
options={
'verbose_name': 'SubItem',
'verbose_name_plural': "SubItem's",
},
),
migrations.AddField(
model_name='ordensubitem',
name='subitem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.SubItem'),
),
migrations.AddField(
model_name='menu',
name='items',
field=models.ManyToManyField(through='exile.OrdenItem', to='exile.Item'),
),
migrations.AddField(
model_name='item',
name='principal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='exile.Page', verbose_name='Pagina Principal'),
),
migrations.AddField(
model_name='item',
name='subitems',
field=models.ManyToManyField(through='exile.OrdenSubItem', to='exile.SubItem'),
),
]
| 0 | 4,850 | 23 |